From f4c8aa1107969c26b1984eb2996a58f816dea71f Mon Sep 17 00:00:00 2001 From: "brking@charter.net" Date: Wed, 5 Jul 2006 17:00:01 -0500 Subject: [SCSI] megaraid: Add support for change_queue_depth Adds support for change_queue_depth so that device queue depth can be changed at runtime through sysfs. Signed-off-by: Acked-by: Seokmann Ju Signed-off-by: James Bottomley diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 9271513..7ae580f 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -330,6 +330,21 @@ static struct device_attribute *megaraid_sdev_attrs[] = { NULL, }; +/** + * megaraid_change_queue_depth - Change the device's queue depth + * @sdev: scsi device struct + * @qdepth: depth to set + * + * Return value: + * actual depth set + **/ +static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + if (qdepth > MBOX_MAX_SCSI_CMDS) + qdepth = MBOX_MAX_SCSI_CMDS; + scsi_adjust_queue_depth(sdev, 0, qdepth); + return sdev->queue_depth; +} /* * Scsi host template for megaraid unified driver @@ -343,6 +358,7 @@ static struct scsi_host_template megaraid_template_g = { .eh_device_reset_handler = megaraid_reset_handler, .eh_bus_reset_handler = megaraid_reset_handler, .eh_host_reset_handler = megaraid_reset_handler, + .change_queue_depth = megaraid_change_queue_depth, .use_clustering = ENABLE_CLUSTERING, .sdev_attrs = megaraid_sdev_attrs, .shost_attrs = megaraid_shost_attrs, -- cgit v0.10.2 From 0c269e6d3c615403a6e0acbe6e88f1c0da9c2396 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 12 Jul 2006 09:51:04 -0400 Subject: [SCSI] mptsas: add parent port backlink This takes advantage of the sas class backlink function to show which port on an expander is used to communicate with the parent. Signed-off-by: James Bottomley diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f66f220..dfdd1e4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1642,14 +1642,18 @@ static int mptsas_probe_one_phy(struct device *dev, for (i = 0; i < port_info->num_phys; i++) if (port_info->phy_info[i].identify.sas_address == - identify.sas_address) + identify.sas_address) { + sas_port_mark_backlink(port); goto out; + } } else if (scsi_is_sas_rphy(parent)) { struct sas_rphy *parent_rphy = dev_to_rphy(parent); if (identify.sas_address == - parent_rphy->identify.sas_address) + parent_rphy->identify.sas_address) { + sas_port_mark_backlink(port); goto out; + } } switch (identify.device_type) { -- cgit v0.10.2 From 1c57e86d75cf162bdadb3a5fe0cd3f65aa1a9ca3 Mon Sep 17 00:00:00 2001 From: Erich Chen Date: Wed, 12 Jul 2006 08:59:32 -0700 Subject: [SCSI] arcmsr: initial driver, version 1.20.00.13 arcmsr is a driver for the Areca Raid controller, a host based RAID subsystem that speaks SCSI at the firmware level. This patch is quite a clean up over the initial submission with contributions from: Randy Dunlap Christoph Hellwig Matthew Wilcox Adrian Bunk Signed-off-by: Erich Chen Signed-off-by: Andrew Morton Signed-off-by: James Bottomley diff --git a/Documentation/scsi/ChangeLog.arcmsr b/Documentation/scsi/ChangeLog.arcmsr new file mode 100644 index 0000000..162c47f --- /dev/null +++ b/Documentation/scsi/ChangeLog.arcmsr @@ -0,0 +1,56 @@ +************************************************************************** +** History +** +** REV# DATE NAME DESCRIPTION +** 1.00.00.00 3/31/2004 Erich Chen First release +** 1.10.00.04 7/28/2004 Erich Chen modify for ioctl +** 1.10.00.06 8/28/2004 Erich Chen modify for 2.6.x +** 1.10.00.08 9/28/2004 Erich Chen modify for x86_64 +** 1.10.00.10 10/10/2004 Erich Chen bug fix for SMP & ioctl +** 1.20.00.00 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error +** 1.20.00.02 12/09/2004 Erich Chen bug fix with over 2T bytes RAID Volume +** 1.20.00.04 1/09/2005 Erich Chen fits for Debian linux kernel version 2.2.xx +** 1.20.00.05 2/20/2005 Erich Chen cleanly as look like a Linux driver at 2.6.x +** thanks for peoples kindness comment +** Kornel Wieliczek +** Christoph Hellwig +** Adrian Bunk +** Andrew Morton +** Christoph Hellwig +** James Bottomley +** Arjan van de Ven +** 1.20.00.06 3/12/2005 Erich Chen fix with arcmsr_pci_unmap_dma "unsigned long" cast, +** modify PCCB POOL allocated by "dma_alloc_coherent" +** (Kornel Wieliczek's comment) +** 1.20.00.07 3/23/2005 Erich Chen bug fix with arcmsr_scsi_host_template_init +** occur segmentation fault, +** if RAID adapter does not on PCI slot +** and modprobe/rmmod this driver twice. +** bug fix enormous stack usage (Adrian Bunk's comment) +** 1.20.00.08 6/23/2005 Erich Chen bug fix with abort command, +** in case of heavy loading when sata cable +** working on low quality connection +** 1.20.00.09 9/12/2005 Erich Chen bug fix with abort command handling, firmware version check +** and firmware update notify for hardware bug fix +** 1.20.00.10 9/23/2005 Erich Chen enhance sysfs function for change driver's max tag Q number. +** add DMA_64BIT_MASK for backward compatible with all 2.6.x +** add some useful message for abort command +** add ioctl code 'ARCMSR_IOCTL_FLUSH_ADAPTER_CACHE' +** customer can send this command for sync raid volume data +** 1.20.00.11 9/29/2005 Erich Chen by comment of Arjan van de Ven fix incorrect msleep redefine +** cast off sizeof(dma_addr_t) condition for 64bit pci_set_dma_mask +** 1.20.00.12 9/30/2005 Erich Chen bug fix with 64bit platform's ccbs using if over 4G system memory +** change 64bit pci_set_consistent_dma_mask into 32bit +** increcct adapter count if adapter initialize fail. +** miss edit at arcmsr_build_ccb.... +** psge += sizeof(struct _SG64ENTRY *) => +** psge += sizeof(struct _SG64ENTRY) +** 64 bits sg entry would be incorrectly calculated +** thanks Kornel Wieliczek give me kindly notify +** and detail description +** 1.20.00.13 11/15/2005 Erich Chen scheduling pending ccb with FIFO +** change the architecture of arcmsr command queue list +** for linux standard list +** enable usage of pci message signal interrupt +** follow Randy.Danlup kindness suggestion cleanup this code +************************************************************************** \ No newline at end of file diff --git a/Documentation/scsi/arcmsr_spec.txt b/Documentation/scsi/arcmsr_spec.txt new file mode 100644 index 0000000..5e00423 --- /dev/null +++ b/Documentation/scsi/arcmsr_spec.txt @@ -0,0 +1,574 @@ +******************************************************************************* +** ARECA FIRMWARE SPEC +******************************************************************************* +** Usage of IOP331 adapter +** (All In/Out is in IOP331's view) +** 1. Message 0 --> InitThread message and retrun code +** 2. Doorbell is used for RS-232 emulation +** inDoorBell : bit0 -- data in ready +** (DRIVER DATA WRITE OK) +** bit1 -- data out has been read +** (DRIVER DATA READ OK) +** outDooeBell: bit0 -- data out ready +** (IOP331 DATA WRITE OK) +** bit1 -- data in has been read +** (IOP331 DATA READ OK) +** 3. Index Memory Usage +** offset 0xf00 : for RS232 out (request buffer) +** offset 0xe00 : for RS232 in (scratch buffer) +** offset 0xa00 : for inbound message code message_rwbuffer +** (driver send to IOP331) +** offset 0xa00 : for outbound message code message_rwbuffer +** (IOP331 send to driver) +** 4. RS-232 emulation +** Currently 128 byte buffer is used +** 1st uint32_t : Data length (1--124) +** Byte 4--127 : Max 124 bytes of data +** 5. PostQ +** All SCSI Command must be sent through postQ: +** (inbound queue port) Request frame must be 32 bytes aligned +** #bit27--bit31 => flag for post ccb +** #bit0--bit26 => real address (bit27--bit31) of post arcmsr_cdb +** bit31 : +** 0 : 256 bytes frame +** 1 : 512 bytes frame +** bit30 : +** 0 : normal request +** 1 : BIOS request +** bit29 : reserved +** bit28 : reserved +** bit27 : reserved +** --------------------------------------------------------------------------- +** (outbount queue port) Request reply +** #bit27--bit31 +** => flag for reply +** #bit0--bit26 +** => real address (bit27--bit31) of reply arcmsr_cdb +** bit31 : must be 0 (for this type of reply) +** bit30 : reserved for BIOS handshake +** bit29 : reserved +** bit28 : +** 0 : no error, ignore AdapStatus/DevStatus/SenseData +** 1 : Error, error code in AdapStatus/DevStatus/SenseData +** bit27 : reserved +** 6. BIOS request +** All BIOS request is the same with request from PostQ +** Except : +** Request frame is sent from configuration space +** offset: 0x78 : Request Frame (bit30 == 1) +** offset: 0x18 : writeonly to generate +** IRQ to IOP331 +** Completion of request: +** (bit30 == 0, bit28==err flag) +** 7. Definition of SGL entry (structure) +** 8. Message1 Out - Diag Status Code (????) +** 9. Message0 message code : +** 0x00 : NOP +** 0x01 : Get Config +** ->offset 0xa00 :for outbound message code message_rwbuffer +** (IOP331 send to driver) +** Signature 0x87974060(4) +** Request len 0x00000200(4) +** numbers of queue 0x00000100(4) +** SDRAM Size 0x00000100(4)-->256 MB +** IDE Channels 0x00000008(4) +** vendor 40 bytes char +** model 8 bytes char +** FirmVer 16 bytes char +** Device Map 16 bytes char +** FirmwareVersion DWORD <== Added for checking of +** new firmware capability +** 0x02 : Set Config +** ->offset 0xa00 :for inbound message code message_rwbuffer +** (driver send to IOP331) +** Signature 0x87974063(4) +** UPPER32 of Request Frame (4)-->Driver Only +** 0x03 : Reset (Abort all queued Command) +** 0x04 : Stop Background Activity +** 0x05 : Flush Cache +** 0x06 : Start Background Activity +** (re-start if background is halted) +** 0x07 : Check If Host Command Pending +** (Novell May Need This Function) +** 0x08 : Set controller time +** ->offset 0xa00 : for inbound message code message_rwbuffer +** (driver to IOP331) +** byte 0 : 0xaa <-- signature +** byte 1 : 0x55 <-- signature +** byte 2 : year (04) +** byte 3 : month (1..12) +** byte 4 : date (1..31) +** byte 5 : hour (0..23) +** byte 6 : minute (0..59) +** byte 7 : second (0..59) +******************************************************************************* +******************************************************************************* +** RS-232 Interface for Areca Raid Controller +** The low level command interface is exclusive with VT100 terminal +** -------------------------------------------------------------------- +** 1. Sequence of command execution +** -------------------------------------------------------------------- +** (A) Header : 3 bytes sequence (0x5E, 0x01, 0x61) +** (B) Command block : variable length of data including length, +** command code, data and checksum byte +** (C) Return data : variable length of data +** -------------------------------------------------------------------- +** 2. Command block +** -------------------------------------------------------------------- +** (A) 1st byte : command block length (low byte) +** (B) 2nd byte : command block length (high byte) +** note ..command block length shouldn't > 2040 bytes, +** length excludes these two bytes +** (C) 3rd byte : command code +** (D) 4th and following bytes : variable length data bytes +** depends on command code +** (E) last byte : checksum byte (sum of 1st byte until last data byte) +** -------------------------------------------------------------------- +** 3. Command code and associated data +** -------------------------------------------------------------------- +** The following are command code defined in raid controller Command +** code 0x10--0x1? are used for system level management, +** no password checking is needed and should be implemented in separate +** well controlled utility and not for end user access. +** Command code 0x20--0x?? always check the password, +** password must be entered to enable these command. +** enum +** { +** GUI_SET_SERIAL=0x10, +** GUI_SET_VENDOR, +** GUI_SET_MODEL, +** GUI_IDENTIFY, +** GUI_CHECK_PASSWORD, +** GUI_LOGOUT, +** GUI_HTTP, +** GUI_SET_ETHERNET_ADDR, +** GUI_SET_LOGO, +** GUI_POLL_EVENT, +** GUI_GET_EVENT, +** GUI_GET_HW_MONITOR, +** // GUI_QUICK_CREATE=0x20, (function removed) +** GUI_GET_INFO_R=0x20, +** GUI_GET_INFO_V, +** GUI_GET_INFO_P, +** GUI_GET_INFO_S, +** GUI_CLEAR_EVENT, +** GUI_MUTE_BEEPER=0x30, +** GUI_BEEPER_SETTING, +** GUI_SET_PASSWORD, +** GUI_HOST_INTERFACE_MODE, +** GUI_REBUILD_PRIORITY, +** GUI_MAX_ATA_MODE, +** GUI_RESET_CONTROLLER, +** GUI_COM_PORT_SETTING, +** GUI_NO_OPERATION, +** GUI_DHCP_IP, +** GUI_CREATE_PASS_THROUGH=0x40, +** GUI_MODIFY_PASS_THROUGH, +** GUI_DELETE_PASS_THROUGH, +** GUI_IDENTIFY_DEVICE, +** GUI_CREATE_RAIDSET=0x50, +** GUI_DELETE_RAIDSET, +** GUI_EXPAND_RAIDSET, +** GUI_ACTIVATE_RAIDSET, +** GUI_CREATE_HOT_SPARE, +** GUI_DELETE_HOT_SPARE, +** GUI_CREATE_VOLUME=0x60, +** GUI_MODIFY_VOLUME, +** GUI_DELETE_VOLUME, +** GUI_START_CHECK_VOLUME, +** GUI_STOP_CHECK_VOLUME +** }; +** Command description : +** GUI_SET_SERIAL : Set the controller serial# +** byte 0,1 : length +** byte 2 : command code 0x10 +** byte 3 : password length (should be 0x0f) +** byte 4-0x13 : should be "ArEcATecHnoLogY" +** byte 0x14--0x23 : Serial number string (must be 16 bytes) +** GUI_SET_VENDOR : Set vendor string for the controller +** byte 0,1 : length +** byte 2 : command code 0x11 +** byte 3 : password length (should be 0x08) +** byte 4-0x13 : should be "ArEcAvAr" +** byte 0x14--0x3B : vendor string (must be 40 bytes) +** GUI_SET_MODEL : Set the model name of the controller +** byte 0,1 : length +** byte 2 : command code 0x12 +** byte 3 : password length (should be 0x08) +** byte 4-0x13 : should be "ArEcAvAr" +** byte 0x14--0x1B : model string (must be 8 bytes) +** GUI_IDENTIFY : Identify device +** byte 0,1 : length +** byte 2 : command code 0x13 +** return "Areca RAID Subsystem " +** GUI_CHECK_PASSWORD : Verify password +** byte 0,1 : length +** byte 2 : command code 0x14 +** byte 3 : password length +** byte 4-0x?? : user password to be checked +** GUI_LOGOUT : Logout GUI (force password checking on next command) +** byte 0,1 : length +** byte 2 : command code 0x15 +** GUI_HTTP : HTTP interface (reserved for Http proxy service)(0x16) +** +** GUI_SET_ETHERNET_ADDR : Set the ethernet MAC address +** byte 0,1 : length +** byte 2 : command code 0x17 +** byte 3 : password length (should be 0x08) +** byte 4-0x13 : should be "ArEcAvAr" +** byte 0x14--0x19 : Ethernet MAC address (must be 6 bytes) +** GUI_SET_LOGO : Set logo in HTTP +** byte 0,1 : length +** byte 2 : command code 0x18 +** byte 3 : Page# (0/1/2/3) (0xff --> clear OEM logo) +** byte 4/5/6/7 : 0x55/0xaa/0xa5/0x5a +** byte 8 : TITLE.JPG data (each page must be 2000 bytes) +** note page0 1st 2 byte must be +** actual length of the JPG file +** GUI_POLL_EVENT : Poll If Event Log Changed +** byte 0,1 : length +** byte 2 : command code 0x19 +** GUI_GET_EVENT : Read Event +** byte 0,1 : length +** byte 2 : command code 0x1a +** byte 3 : Event Page (0:1st page/1/2/3:last page) +** GUI_GET_HW_MONITOR : Get HW monitor data +** byte 0,1 : length +** byte 2 : command code 0x1b +** byte 3 : # of FANs(example 2) +** byte 4 : # of Voltage sensor(example 3) +** byte 5 : # of temperature sensor(example 2) +** byte 6 : # of power +** byte 7/8 : Fan#0 (RPM) +** byte 9/10 : Fan#1 +** byte 11/12 : Voltage#0 original value in *1000 +** byte 13/14 : Voltage#0 value +** byte 15/16 : Voltage#1 org +** byte 17/18 : Voltage#1 +** byte 19/20 : Voltage#2 org +** byte 21/22 : Voltage#2 +** byte 23 : Temp#0 +** byte 24 : Temp#1 +** byte 25 : Power indicator (bit0 : power#0, +** bit1 : power#1) +** byte 26 : UPS indicator +** GUI_QUICK_CREATE : Quick create raid/volume set +** byte 0,1 : length +** byte 2 : command code 0x20 +** byte 3/4/5/6 : raw capacity +** byte 7 : raid level +** byte 8 : stripe size +** byte 9 : spare +** byte 10/11/12/13: device mask (the devices to create raid/volume) +** This function is removed, application like +** to implement quick create function +** need to use GUI_CREATE_RAIDSET and GUI_CREATE_VOLUMESET function. +** GUI_GET_INFO_R : Get Raid Set Information +** byte 0,1 : length +** byte 2 : command code 0x20 +** byte 3 : raidset# +** typedef struct sGUI_RAIDSET +** { +** BYTE grsRaidSetName[16]; +** DWORD grsCapacity; +** DWORD grsCapacityX; +** DWORD grsFailMask; +** BYTE grsDevArray[32]; +** BYTE grsMemberDevices; +** BYTE grsNewMemberDevices; +** BYTE grsRaidState; +** BYTE grsVolumes; +** BYTE grsVolumeList[16]; +** BYTE grsRes1; +** BYTE grsRes2; +** BYTE grsRes3; +** BYTE grsFreeSegments; +** DWORD grsRawStripes[8]; +** DWORD grsRes4; +** DWORD grsRes5; // Total to 128 bytes +** DWORD grsRes6; // Total to 128 bytes +** } sGUI_RAIDSET, *pGUI_RAIDSET; +** GUI_GET_INFO_V : Get Volume Set Information +** byte 0,1 : length +** byte 2 : command code 0x21 +** byte 3 : volumeset# +** typedef struct sGUI_VOLUMESET +** { +** BYTE gvsVolumeName[16]; // 16 +** DWORD gvsCapacity; +** DWORD gvsCapacityX; +** DWORD gvsFailMask; +** DWORD gvsStripeSize; +** DWORD gvsNewFailMask; +** DWORD gvsNewStripeSize; +** DWORD gvsVolumeStatus; +** DWORD gvsProgress; // 32 +** sSCSI_ATTR gvsScsi; +** BYTE gvsMemberDisks; +** BYTE gvsRaidLevel; // 8 +** BYTE gvsNewMemberDisks; +** BYTE gvsNewRaidLevel; +** BYTE gvsRaidSetNumber; +** BYTE gvsRes0; // 4 +** BYTE gvsRes1[4]; // 64 bytes +** } sGUI_VOLUMESET, *pGUI_VOLUMESET; +** GUI_GET_INFO_P : Get Physical Drive Information +** byte 0,1 : length +** byte 2 : command code 0x22 +** byte 3 : drive # (from 0 to max-channels - 1) +** typedef struct sGUI_PHY_DRV +** { +** BYTE gpdModelName[40]; +** BYTE gpdSerialNumber[20]; +** BYTE gpdFirmRev[8]; +** DWORD gpdCapacity; +** DWORD gpdCapacityX; // Reserved for expansion +** BYTE gpdDeviceState; +** BYTE gpdPioMode; +** BYTE gpdCurrentUdmaMode; +** BYTE gpdUdmaMode; +** BYTE gpdDriveSelect; +** BYTE gpdRaidNumber; // 0xff if not belongs to a raid set +** sSCSI_ATTR gpdScsi; +** BYTE gpdReserved[40]; // Total to 128 bytes +** } sGUI_PHY_DRV, *pGUI_PHY_DRV; +** GUI_GET_INFO_S : Get System Information +** byte 0,1 : length +** byte 2 : command code 0x23 +** typedef struct sCOM_ATTR +** { +** BYTE comBaudRate; +** BYTE comDataBits; +** BYTE comStopBits; +** BYTE comParity; +** BYTE comFlowControl; +** } sCOM_ATTR, *pCOM_ATTR; +** typedef struct sSYSTEM_INFO +** { +** BYTE gsiVendorName[40]; +** BYTE gsiSerialNumber[16]; +** BYTE gsiFirmVersion[16]; +** BYTE gsiBootVersion[16]; +** BYTE gsiMbVersion[16]; +** BYTE gsiModelName[8]; +** BYTE gsiLocalIp[4]; +** BYTE gsiCurrentIp[4]; +** DWORD gsiTimeTick; +** DWORD gsiCpuSpeed; +** DWORD gsiICache; +** DWORD gsiDCache; +** DWORD gsiScache; +** DWORD gsiMemorySize; +** DWORD gsiMemorySpeed; +** DWORD gsiEvents; +** BYTE gsiMacAddress[6]; +** BYTE gsiDhcp; +** BYTE gsiBeeper; +** BYTE gsiChannelUsage; +** BYTE gsiMaxAtaMode; +** BYTE gsiSdramEcc; // 1:if ECC enabled +** BYTE gsiRebuildPriority; +** sCOM_ATTR gsiComA; // 5 bytes +** sCOM_ATTR gsiComB; // 5 bytes +** BYTE gsiIdeChannels; +** BYTE gsiScsiHostChannels; +** BYTE gsiIdeHostChannels; +** BYTE gsiMaxVolumeSet; +** BYTE gsiMaxRaidSet; +** BYTE gsiEtherPort; // 1:if ether net port supported +** BYTE gsiRaid6Engine; // 1:Raid6 engine supported +** BYTE gsiRes[75]; +** } sSYSTEM_INFO, *pSYSTEM_INFO; +** GUI_CLEAR_EVENT : Clear System Event +** byte 0,1 : length +** byte 2 : command code 0x24 +** GUI_MUTE_BEEPER : Mute current beeper +** byte 0,1 : length +** byte 2 : command code 0x30 +** GUI_BEEPER_SETTING : Disable beeper +** byte 0,1 : length +** byte 2 : command code 0x31 +** byte 3 : 0->disable, 1->enable +** GUI_SET_PASSWORD : Change password +** byte 0,1 : length +** byte 2 : command code 0x32 +** byte 3 : pass word length ( must <= 15 ) +** byte 4 : password (must be alpha-numerical) +** GUI_HOST_INTERFACE_MODE : Set host interface mode +** byte 0,1 : length +** byte 2 : command code 0x33 +** byte 3 : 0->Independent, 1->cluster +** GUI_REBUILD_PRIORITY : Set rebuild priority +** byte 0,1 : length +** byte 2 : command code 0x34 +** byte 3 : 0/1/2/3 (low->high) +** GUI_MAX_ATA_MODE : Set maximum ATA mode to be used +** byte 0,1 : length +** byte 2 : command code 0x35 +** byte 3 : 0/1/2/3 (133/100/66/33) +** GUI_RESET_CONTROLLER : Reset Controller +** byte 0,1 : length +** byte 2 : command code 0x36 +** *Response with VT100 screen (discard it) +** GUI_COM_PORT_SETTING : COM port setting +** byte 0,1 : length +** byte 2 : command code 0x37 +** byte 3 : 0->COMA (term port), +** 1->COMB (debug port) +** byte 4 : 0/1/2/3/4/5/6/7 +** (1200/2400/4800/9600/19200/38400/57600/115200) +** byte 5 : data bit +** (0:7 bit, 1:8 bit : must be 8 bit) +** byte 6 : stop bit (0:1, 1:2 stop bits) +** byte 7 : parity (0:none, 1:off, 2:even) +** byte 8 : flow control +** (0:none, 1:xon/xoff, 2:hardware => must use none) +** GUI_NO_OPERATION : No operation +** byte 0,1 : length +** byte 2 : command code 0x38 +** GUI_DHCP_IP : Set DHCP option and local IP address +** byte 0,1 : length +** byte 2 : command code 0x39 +** byte 3 : 0:dhcp disabled, 1:dhcp enabled +** byte 4/5/6/7 : IP address +** GUI_CREATE_PASS_THROUGH : Create pass through disk +** byte 0,1 : length +** byte 2 : command code 0x40 +** byte 3 : device # +** byte 4 : scsi channel (0/1) +** byte 5 : scsi id (0-->15) +** byte 6 : scsi lun (0-->7) +** byte 7 : tagged queue (1 : enabled) +** byte 8 : cache mode (1 : enabled) +** byte 9 : max speed (0/1/2/3/4, +** async/20/40/80/160 for scsi) +** (0/1/2/3/4, 33/66/100/133/150 for ide ) +** GUI_MODIFY_PASS_THROUGH : Modify pass through disk +** byte 0,1 : length +** byte 2 : command code 0x41 +** byte 3 : device # +** byte 4 : scsi channel (0/1) +** byte 5 : scsi id (0-->15) +** byte 6 : scsi lun (0-->7) +** byte 7 : tagged queue (1 : enabled) +** byte 8 : cache mode (1 : enabled) +** byte 9 : max speed (0/1/2/3/4, +** async/20/40/80/160 for scsi) +** (0/1/2/3/4, 33/66/100/133/150 for ide ) +** GUI_DELETE_PASS_THROUGH : Delete pass through disk +** byte 0,1 : length +** byte 2 : command code 0x42 +** byte 3 : device# to be deleted +** GUI_IDENTIFY_DEVICE : Identify Device +** byte 0,1 : length +** byte 2 : command code 0x43 +** byte 3 : Flash Method +** (0:flash selected, 1:flash not selected) +** byte 4/5/6/7 : IDE device mask to be flashed +** note .... no response data available +** GUI_CREATE_RAIDSET : Create Raid Set +** byte 0,1 : length +** byte 2 : command code 0x50 +** byte 3/4/5/6 : device mask +** byte 7-22 : raidset name (if byte 7 == 0:use default) +** GUI_DELETE_RAIDSET : Delete Raid Set +** byte 0,1 : length +** byte 2 : command code 0x51 +** byte 3 : raidset# +** GUI_EXPAND_RAIDSET : Expand Raid Set +** byte 0,1 : length +** byte 2 : command code 0x52 +** byte 3 : raidset# +** byte 4/5/6/7 : device mask for expansion +** byte 8/9/10 : (8:0 no change, 1 change, 0xff:terminate, +** 9:new raid level, +** 10:new stripe size +** 0/1/2/3/4/5->4/8/16/32/64/128K ) +** byte 11/12/13 : repeat for each volume in the raidset +** GUI_ACTIVATE_RAIDSET : Activate incomplete raid set +** byte 0,1 : length +** byte 2 : command code 0x53 +** byte 3 : raidset# +** GUI_CREATE_HOT_SPARE : Create hot spare disk +** byte 0,1 : length +** byte 2 : command code 0x54 +** byte 3/4/5/6 : device mask for hot spare creation +** GUI_DELETE_HOT_SPARE : Delete hot spare disk +** byte 0,1 : length +** byte 2 : command code 0x55 +** byte 3/4/5/6 : device mask for hot spare deletion +** GUI_CREATE_VOLUME : Create volume set +** byte 0,1 : length +** byte 2 : command code 0x60 +** byte 3 : raidset# +** byte 4-19 : volume set name +** (if byte4 == 0, use default) +** byte 20-27 : volume capacity (blocks) +** byte 28 : raid level +** byte 29 : stripe size +** (0/1/2/3/4/5->4/8/16/32/64/128K) +** byte 30 : channel +** byte 31 : ID +** byte 32 : LUN +** byte 33 : 1 enable tag +** byte 34 : 1 enable cache +** byte 35 : speed +** (0/1/2/3/4->async/20/40/80/160 for scsi) +** (0/1/2/3/4->33/66/100/133/150 for IDE ) +** byte 36 : 1 to select quick init +** +** GUI_MODIFY_VOLUME : Modify volume Set +** byte 0,1 : length +** byte 2 : command code 0x61 +** byte 3 : volumeset# +** byte 4-19 : new volume set name +** (if byte4 == 0, not change) +** byte 20-27 : new volume capacity (reserved) +** byte 28 : new raid level +** byte 29 : new stripe size +** (0/1/2/3/4/5->4/8/16/32/64/128K) +** byte 30 : new channel +** byte 31 : new ID +** byte 32 : new LUN +** byte 33 : 1 enable tag +** byte 34 : 1 enable cache +** byte 35 : speed +** (0/1/2/3/4->async/20/40/80/160 for scsi) +** (0/1/2/3/4->33/66/100/133/150 for IDE ) +** GUI_DELETE_VOLUME : Delete volume set +** byte 0,1 : length +** byte 2 : command code 0x62 +** byte 3 : volumeset# +** GUI_START_CHECK_VOLUME : Start volume consistency check +** byte 0,1 : length +** byte 2 : command code 0x63 +** byte 3 : volumeset# +** GUI_STOP_CHECK_VOLUME : Stop volume consistency check +** byte 0,1 : length +** byte 2 : command code 0x64 +** --------------------------------------------------------------------- +** 4. Returned data +** --------------------------------------------------------------------- +** (A) Header : 3 bytes sequence (0x5E, 0x01, 0x61) +** (B) Length : 2 bytes +** (low byte 1st, excludes length and checksum byte) +** (C) status or data : +** <1> If length == 1 ==> 1 byte status code +** #define GUI_OK 0x41 +** #define GUI_RAIDSET_NOT_NORMAL 0x42 +** #define GUI_VOLUMESET_NOT_NORMAL 0x43 +** #define GUI_NO_RAIDSET 0x44 +** #define GUI_NO_VOLUMESET 0x45 +** #define GUI_NO_PHYSICAL_DRIVE 0x46 +** #define GUI_PARAMETER_ERROR 0x47 +** #define GUI_UNSUPPORTED_COMMAND 0x48 +** #define GUI_DISK_CONFIG_CHANGED 0x49 +** #define GUI_INVALID_PASSWORD 0x4a +** #define GUI_NO_DISK_SPACE 0x4b +** #define GUI_CHECKSUM_ERROR 0x4c +** #define GUI_PASSWORD_REQUIRED 0x4d +** <2> If length > 1 ==> +** data block returned from controller +** and the contents depends on the command code +** (E) Checksum : checksum of length and status or data byte +************************************************************************** diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 96a81cd..d61662c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -469,6 +469,20 @@ config SCSI_IN2000 To compile this driver as a module, choose M here: the module will be called in2000. +config SCSI_ARCMSR + tristate "ARECA ARC11X0[PCI-X]/ARC12X0[PCI-EXPRESS] SATA-RAID support" + depends on PCI && SCSI + help + This driver supports all of ARECA's SATA RAID controller cards. + This is an ARECA-maintained driver by Erich Chen. + If you have any problems, please mail to: < erich@areca.com.tw > + Areca supports Linux RAID config tools. + + < http://www.areca.com.tw > + + To compile this driver as a module, choose M here: the + module will be called arcmsr (modprobe arcmsr). + source "drivers/scsi/megaraid/Kconfig.megaraid" config SCSI_SATA diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index ebd0cf0..b2de9bf 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_SCSI_PSI240I) += psi240i.o obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o +obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/ obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o obj-$(CONFIG_SCSI_AHA152X) += aha152x.o obj-$(CONFIG_SCSI_AHA1542) += aha1542.o diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile new file mode 100644 index 0000000..721aced --- /dev/null +++ b/drivers/scsi/arcmsr/Makefile @@ -0,0 +1,6 @@ +# File: drivers/arcmsr/Makefile +# Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver. + +arcmsr-objs := arcmsr_attr.o arcmsr_hba.o + +obj-$(CONFIG_SCSI_ARCMSR) := arcmsr.o diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h new file mode 100644 index 0000000..aff96db --- /dev/null +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -0,0 +1,472 @@ +/* +******************************************************************************* +** O.S : Linux +** FILE NAME : arcmsr.h +** BY : Erich Chen +** Description: SCSI RAID Device Driver for +** ARECA RAID Host adapter +******************************************************************************* +** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved. +** +** Web site: www.areca.com.tw +** E-mail: erich@areca.com.tw +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +******************************************************************************* +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** 3. The name of the author may not be used to endorse or promote products +** derived from this software without specific prior written permission. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT +** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY +** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +**(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF +** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************* +*/ +#include + +struct class_device_attribute; + +#define ARCMSR_MAX_OUTSTANDING_CMD 256 +#define ARCMSR_MAX_FREECCB_NUM 288 +#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13" +#define ARCMSR_SCSI_INITIATOR_ID 255 +#define ARCMSR_MAX_XFER_SECTORS 512 +#define ARCMSR_MAX_TARGETID 17 +#define ARCMSR_MAX_TARGETLUN 8 +#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD +#define ARCMSR_MAX_QBUFFER 4096 +#define ARCMSR_MAX_SG_ENTRIES 38 + +/* +******************************************************************************* +** split 64bits dma addressing +******************************************************************************* +*/ +#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16) +#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff) +/* +******************************************************************************* +** MESSAGE CONTROL CODE +******************************************************************************* +*/ +struct CMD_MESSAGE +{ + uint32_t HeaderLength; + uint8_t Signature[8]; + uint32_t Timeout; + uint32_t ControlCode; + uint32_t ReturnCode; + uint32_t Length; +}; +/* +******************************************************************************* +** IOP Message Transfer Data for user space +******************************************************************************* +*/ +struct CMD_MESSAGE_FIELD +{ + struct CMD_MESSAGE cmdmessage; + uint8_t messagedatabuffer[1032]; +}; +/* IOP message transfer */ +#define ARCMSR_MESSAGE_FAIL 0x0001 +/* DeviceType */ +#define ARECA_SATA_RAID 0x90000000 +/* FunctionCode */ +#define FUNCTION_READ_RQBUFFER 0x0801 +#define FUNCTION_WRITE_WQBUFFER 0x0802 +#define FUNCTION_CLEAR_RQBUFFER 0x0803 +#define FUNCTION_CLEAR_WQBUFFER 0x0804 +#define FUNCTION_CLEAR_ALLQBUFFER 0x0805 +#define FUNCTION_RETURN_CODE_3F 0x0806 +#define FUNCTION_SAY_HELLO 0x0807 +#define FUNCTION_SAY_GOODBYE 0x0808 +#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 +/* ARECA IO CONTROL CODE*/ +#define ARCMSR_MESSAGE_READ_RQBUFFER \ + ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER +#define ARCMSR_MESSAGE_WRITE_WQBUFFER \ + ARECA_SATA_RAID | FUNCTION_WRITE_WQBUFFER +#define ARCMSR_MESSAGE_CLEAR_RQBUFFER \ + ARECA_SATA_RAID | FUNCTION_CLEAR_RQBUFFER +#define ARCMSR_MESSAGE_CLEAR_WQBUFFER \ + ARECA_SATA_RAID | FUNCTION_CLEAR_WQBUFFER +#define ARCMSR_MESSAGE_CLEAR_ALLQBUFFER \ + ARECA_SATA_RAID | FUNCTION_CLEAR_ALLQBUFFER +#define ARCMSR_MESSAGE_RETURN_CODE_3F \ + ARECA_SATA_RAID | FUNCTION_RETURN_CODE_3F +#define ARCMSR_MESSAGE_SAY_HELLO \ + ARECA_SATA_RAID | FUNCTION_SAY_HELLO +#define ARCMSR_MESSAGE_SAY_GOODBYE \ + ARECA_SATA_RAID | FUNCTION_SAY_GOODBYE +#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \ + ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE +/* ARECA IOCTL ReturnCode */ +#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 +#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 +#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F +/* +************************************************************* +** structure for holding DMA address data +************************************************************* +*/ +#define IS_SG64_ADDR 0x01000000 /* bit24 */ +struct SG32ENTRY +{ + uint32_t length; + uint32_t address; +}; +struct SG64ENTRY +{ + uint32_t length; + uint32_t address; + uint32_t addresshigh; +}; +struct SGENTRY_UNION +{ + union + { + struct SG32ENTRY sg32entry; + struct SG64ENTRY sg64entry; + }u; +}; +/* +******************************************************************** +** Q Buffer of IOP Message Transfer +******************************************************************** +*/ +struct QBUFFER +{ + uint32_t data_len; + uint8_t data[124]; +}; +/* +******************************************************************************* +** FIRMWARE INFO +******************************************************************************* +*/ +struct FIRMWARE_INFO +{ + uint32_t signature; /*0, 00-03*/ + uint32_t request_len; /*1, 04-07*/ + uint32_t numbers_queue; /*2, 08-11*/ + uint32_t sdram_size; /*3, 12-15*/ + uint32_t ide_channels; /*4, 16-19*/ + char vendor[40]; /*5, 20-59*/ + char model[8]; /*15, 60-67*/ + char firmware_ver[16]; /*17, 68-83*/ + char device_map[16]; /*21, 84-99*/ +}; +/* signature of set and get firmware config */ +#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060 +#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063 +/* message code of inbound message register */ +#define ARCMSR_INBOUND_MESG0_NOP 0x00000000 +#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001 +#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002 +#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003 +#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004 +#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005 +#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006 +#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007 +#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008 +/* doorbell interrupt generator */ +#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001 +#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002 +#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001 +#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002 +/* ccb areca cdb flag */ +#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000 +#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000 +#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000 +#define ARCMSR_CCBREPLY_FLAG_ERROR 0x10000000 +/* outbound firmware ok */ +#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000 +/* +******************************************************************************* +** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) +******************************************************************************* +*/ +struct ARCMSR_CDB +{ + uint8_t Bus; + uint8_t TargetID; + uint8_t LUN; + uint8_t Function; + + uint8_t CdbLength; + uint8_t sgcount; + uint8_t Flags; +#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01 +#define ARCMSR_CDB_FLAG_BIOS 0x02 +#define ARCMSR_CDB_FLAG_WRITE 0x04 +#define ARCMSR_CDB_FLAG_SIMPLEQ 0x00 +#define ARCMSR_CDB_FLAG_HEADQ 0x08 +#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10 + uint8_t Reserved1; + + uint32_t Context; + uint32_t DataLength; + + uint8_t Cdb[16]; + + uint8_t DeviceStatus; +#define ARCMSR_DEV_CHECK_CONDITION 0x02 +#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0 +#define ARCMSR_DEV_ABORTED 0xF1 +#define ARCMSR_DEV_INIT_FAIL 0xF2 + uint8_t SenseData[15]; + + union + { + struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES]; + struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES]; + } u; +}; +/* +******************************************************************************* +** Messaging Unit (MU) of the Intel R 80331 I/O processor (80331) +******************************************************************************* +*/ +struct MessageUnit +{ + uint32_t resrved0[4]; /*0000 000F*/ + uint32_t inbound_msgaddr0; /*0010 0013*/ + uint32_t inbound_msgaddr1; /*0014 0017*/ + uint32_t outbound_msgaddr0; /*0018 001B*/ + uint32_t outbound_msgaddr1; /*001C 001F*/ + uint32_t inbound_doorbell; /*0020 0023*/ + uint32_t inbound_intstatus; /*0024 0027*/ + uint32_t inbound_intmask; /*0028 002B*/ + uint32_t outbound_doorbell; /*002C 002F*/ + uint32_t outbound_intstatus; /*0030 0033*/ + uint32_t outbound_intmask; /*0034 0037*/ + uint32_t reserved1[2]; /*0038 003F*/ + uint32_t inbound_queueport; /*0040 0043*/ + uint32_t outbound_queueport; /*0044 0047*/ + uint32_t reserved2[2]; /*0048 004F*/ + uint32_t reserved3[492]; /*0050 07FF 492*/ + uint32_t reserved4[128]; /*0800 09FF 128*/ + uint32_t message_rwbuffer[256]; /*0a00 0DFF 256*/ + uint32_t message_wbuffer[32]; /*0E00 0E7F 32*/ + uint32_t reserved5[32]; /*0E80 0EFF 32*/ + uint32_t message_rbuffer[32]; /*0F00 0F7F 32*/ + uint32_t reserved6[32]; /*0F80 0FFF 32*/ +}; +/* +******************************************************************************* +** Adapter Control Block +******************************************************************************* +*/ +struct AdapterControlBlock +{ + struct pci_dev * pdev; + struct Scsi_Host * host; + unsigned long vir2phy_offset; + /* Offset is used in making arc cdb physical to virtual calculations */ + uint32_t outbound_int_enable; + + struct MessageUnit __iomem * pmu; + /* message unit ATU inbound base address0 */ + + uint32_t acb_flags; +#define ACB_F_SCSISTOPADAPTER 0x0001 +#define ACB_F_MSG_STOP_BGRB 0x0002 + /* stop RAID background rebuild */ +#define ACB_F_MSG_START_BGRB 0x0004 + /* stop RAID background rebuild */ +#define ACB_F_IOPDATA_OVERFLOW 0x0008 + /* iop message data rqbuffer overflow */ +#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010 + /* message clear wqbuffer */ +#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020 + /* message clear rqbuffer */ +#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040 +#define ACB_F_BUS_RESET 0x0080 +#define ACB_F_IOP_INITED 0x0100 + /* iop init */ + + struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; + /* used for memory free */ + struct list_head ccb_free_list; + /* head of free ccb list */ + atomic_t ccboutstandingcount; + + void * dma_coherent; + /* dma_coherent used for memory free */ + dma_addr_t dma_coherent_handle; + /* dma_coherent_handle used for memory free */ + + uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; + /* data collection buffer for read from 80331 */ + int32_t rqbuf_firstindex; + /* first of read buffer */ + int32_t rqbuf_lastindex; + /* last of read buffer */ + uint8_t wqbuffer[ARCMSR_MAX_QBUFFER]; + /* data collection buffer for write to 80331 */ + int32_t wqbuf_firstindex; + /* first of write buffer */ + int32_t wqbuf_lastindex; + /* last of write buffer */ + uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; + /* id0 ..... id15, lun0...lun7 */ +#define ARECA_RAID_GONE 0x55 +#define ARECA_RAID_GOOD 0xaa + uint32_t num_resets; + uint32_t num_aborts; + uint32_t firm_request_len; + uint32_t firm_numbers_queue; + uint32_t firm_sdram_size; + uint32_t firm_hd_channels; + char firm_model[12]; + char firm_version[20]; +};/* HW_DEVICE_EXTENSION */ +/* +******************************************************************************* +** Command Control Block +** this CCB length must be 32 bytes boundary +******************************************************************************* +*/ +struct CommandControlBlock +{ + struct ARCMSR_CDB arcmsr_cdb; + /* + ** 0-503 (size of CDB=504): + ** arcmsr messenger scsi command descriptor size 504 bytes + */ + uint32_t cdb_shifted_phyaddr; + /* 504-507 */ + uint32_t reserved1; + /* 508-511 */ +#if BITS_PER_LONG == 64 + /* ======================512+64 bytes======================== */ + struct list_head list; + /* 512-527 16 bytes next/prev ptrs for ccb lists */ + struct scsi_cmnd * pcmd; + /* 528-535 8 bytes pointer of linux scsi command */ + struct AdapterControlBlock * acb; + /* 536-543 8 bytes pointer of acb */ + + uint16_t ccb_flags; + /* 544-545 */ + #define CCB_FLAG_READ 0x0000 + #define CCB_FLAG_WRITE 0x0001 + #define CCB_FLAG_ERROR 0x0002 + #define CCB_FLAG_FLUSHCACHE 0x0004 + #define CCB_FLAG_MASTER_ABORTED 0x0008 + uint16_t startdone; + /* 546-547 */ + #define ARCMSR_CCB_DONE 0x0000 + #define ARCMSR_CCB_START 0x55AA + #define ARCMSR_CCB_ABORTED 0xAA55 + #define ARCMSR_CCB_ILLEGAL 0xFFFF + uint32_t reserved2[7]; + /* 548-551 552-555 556-559 560-563 564-567 568-571 572-575 */ +#else + /* ======================512+32 bytes======================== */ + struct list_head list; + /* 512-519 8 bytes next/prev ptrs for ccb lists */ + struct scsi_cmnd * pcmd; + /* 520-523 4 bytes pointer of linux scsi command */ + struct AdapterControlBlock * acb; + /* 524-527 4 bytes pointer of acb */ + + uint16_t ccb_flags; + /* 528-529 */ + #define CCB_FLAG_READ 0x0000 + #define CCB_FLAG_WRITE 0x0001 + #define CCB_FLAG_ERROR 0x0002 + #define CCB_FLAG_FLUSHCACHE 0x0004 + #define CCB_FLAG_MASTER_ABORTED 0x0008 + uint16_t startdone; + /* 530-531 */ + #define ARCMSR_CCB_DONE 0x0000 + #define ARCMSR_CCB_START 0x55AA + #define ARCMSR_CCB_ABORTED 0xAA55 + #define ARCMSR_CCB_ILLEGAL 0xFFFF + uint32_t reserved2[3]; + /* 532-535 536-539 540-543 */ +#endif + /* ========================================================== */ +}; +/* +******************************************************************************* +** ARECA SCSI sense data +******************************************************************************* +*/ +struct SENSE_DATA +{ + uint8_t ErrorCode:7; +#define SCSI_SENSE_CURRENT_ERRORS 0x70 +#define SCSI_SENSE_DEFERRED_ERRORS 0x71 + uint8_t Valid:1; + uint8_t SegmentNumber; + uint8_t SenseKey:4; + uint8_t Reserved:1; + uint8_t IncorrectLength:1; + uint8_t EndOfMedia:1; + uint8_t FileMark:1; + uint8_t Information[4]; + uint8_t AdditionalSenseLength; + uint8_t CommandSpecificInformation[4]; + uint8_t AdditionalSenseCode; + uint8_t AdditionalSenseCodeQualifier; + uint8_t FieldReplaceableUnitCode; + uint8_t SenseKeySpecific[3]; +}; +/* +******************************************************************************* +** Outbound Interrupt Status Register - OISR +******************************************************************************* +*/ +#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30 +#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10 +#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08 +#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04 +#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02 +#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01 +#define ARCMSR_MU_OUTBOUND_HANDLE_INT \ + (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \ + |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \ + |ARCMSR_MU_OUTBOUND_DOORBELL_INT \ + |ARCMSR_MU_OUTBOUND_POSTQUEUE_INT \ + |ARCMSR_MU_OUTBOUND_PCI_INT) +/* +******************************************************************************* +** Outbound Interrupt Mask Register - OIMR +******************************************************************************* +*/ +#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34 +#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10 +#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08 +#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04 +#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02 +#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01 +#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F + +extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb); +extern struct class_device_attribute *arcmsr_host_attrs[]; +extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb); +void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); + diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c new file mode 100644 index 0000000..0459f41 --- /dev/null +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -0,0 +1,392 @@ +/* +******************************************************************************* +** O.S : Linux +** FILE NAME : arcmsr_attr.c +** BY : Erich Chen +** Description: attributes exported to sysfs and device host +******************************************************************************* +** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved +** +** Web site: www.areca.com.tw +** E-mail: erich@areca.com.tw +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +******************************************************************************* +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** 3. The name of the author may not be used to endorse or promote products +** derived from this software without specific prior written permission. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT +** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY +** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF +** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************* +** For history of changes, see Documentation/scsi/ChangeLog.arcmsr +** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt +******************************************************************************* +*/ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "arcmsr.h" + +struct class_device_attribute *arcmsr_host_attrs[]; + +static ssize_t +arcmsr_sysfs_iop_message_read(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + struct MessageUnit __iomem *reg = acb->pmu; + uint8_t *pQbuffer,*ptmpQbuffer; + int32_t allxfer_len = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* do message unit read. */ + ptmpQbuffer = (uint8_t *)buf; + while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) + && (allxfer_len < 1031)) { + pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; + memcpy(ptmpQbuffer, pQbuffer, 1); + acb->rqbuf_firstindex++; + acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + ptmpQbuffer++; + allxfer_len++; + } + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *) + ®->message_rbuffer; + uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data; + int32_t iop_len; + + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + iop_len = readl(&prbuffer->data_len); + while (iop_len > 0) { + acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); + acb->rqbuf_lastindex++; + acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + iop_data++; + iop_len--; + } + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, + ®->inbound_doorbell); + } + return (allxfer_len); +} + +static ssize_t +arcmsr_sysfs_iop_message_write(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; + uint8_t *pQbuffer, *ptmpuserbuffer; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (count > 1032) + return -EINVAL; + /* do message unit write. */ + ptmpuserbuffer = (uint8_t *)buf; + user_len = (int32_t)count; + wqbuf_lastindex = acb->wqbuf_lastindex; + wqbuf_firstindex = acb->wqbuf_firstindex; + if (wqbuf_lastindex != wqbuf_firstindex) { + arcmsr_post_Qbuffer(acb); + return 0; /*need retry*/ + } else { + my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) + &(ARCMSR_MAX_QBUFFER - 1); + if (my_empty_len >= user_len) { + while (user_len > 0) { + pQbuffer = + &acb->wqbuffer[acb->wqbuf_lastindex]; + memcpy(pQbuffer, ptmpuserbuffer, 1); + acb->wqbuf_lastindex++; + acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + ptmpuserbuffer++; + user_len--; + } + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + acb->acb_flags &= + ~ACB_F_MESSAGE_WQBUFFER_CLEARED; + arcmsr_post_Qbuffer(acb); + } + return count; + } else { + return 0; /*need retry*/ + } + } +} + +static ssize_t +arcmsr_sysfs_iop_message_clear(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + struct MessageUnit __iomem *reg = acb->pmu; + uint8_t *pQbuffer; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK + , ®->inbound_doorbell); + } + acb->acb_flags |= + (ACB_F_MESSAGE_WQBUFFER_CLEARED + | ACB_F_MESSAGE_RQBUFFER_CLEARED + | ACB_F_MESSAGE_WQBUFFER_READED); + acb->rqbuf_firstindex = 0; + acb->rqbuf_lastindex = 0; + acb->wqbuf_firstindex = 0; + acb->wqbuf_lastindex = 0; + pQbuffer = acb->rqbuffer; + memset(pQbuffer, 0, sizeof (struct QBUFFER)); + pQbuffer = acb->wqbuffer; + memset(pQbuffer, 0, sizeof (struct QBUFFER)); + return 1; +} + +static struct bin_attribute arcmsr_sysfs_message_read_attr = { + .attr = { + .name = "mu_read", + .mode = S_IRUSR , + .owner = THIS_MODULE, + }, + .size = 1032, + .read = arcmsr_sysfs_iop_message_read, +}; + +static struct bin_attribute arcmsr_sysfs_message_write_attr = { + .attr = { + .name = "mu_write", + .mode = S_IWUSR, + .owner = THIS_MODULE, + }, + .size = 1032, + .write = arcmsr_sysfs_iop_message_write, +}; + +static struct bin_attribute arcmsr_sysfs_message_clear_attr = { + .attr = { + .name = "mu_clear", + .mode = S_IWUSR, + .owner = THIS_MODULE, + }, + .size = 1, + .write = arcmsr_sysfs_iop_message_clear, +}; + +int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb) +{ + struct Scsi_Host *host = acb->host; + int error; + + error = sysfs_create_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_read_attr); + if (error) { + printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n"); + goto error_bin_file_message_read; + } + error = sysfs_create_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_write_attr); + if (error) { + printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n"); + goto error_bin_file_message_write; + } + error = sysfs_create_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_clear_attr); + if (error) { + printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n"); + goto error_bin_file_message_clear; + } + return 0; +error_bin_file_message_clear: + error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_write_attr); + if (error) + printk(KERN_ERR "arcmsr: sysfs_remove_bin_file mu_write failed\n"); +error_bin_file_message_write: + error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_read_attr); + if (error) + printk(KERN_ERR "arcmsr: sysfs_remove_bin_file mu_read failed\n"); +error_bin_file_message_read: + return error; +} + +void +arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) { + struct Scsi_Host *host = acb->host; + int error; + + error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_clear_attr); + if (error) + printk(KERN_ERR "arcmsr: free sysfs mu_clear failed\n"); + error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_write_attr); + if (error) + printk(KERN_ERR "arcmsr: free sysfs mu_write failed\n"); + error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + &arcmsr_sysfs_message_read_attr); + if (error) + printk(KERN_ERR "arcmsr: free sysfss mu_read failed\n"); +} + + +static ssize_t +arcmsr_attr_host_driver_version(struct class_device *cdev, char *buf) { + return snprintf(buf, PAGE_SIZE, + "ARCMSR: %s\n", + ARCMSR_DRIVER_VERSION); +} + +static ssize_t +arcmsr_attr_host_driver_posted_cmd(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "Current commands posted: %4d\n", + atomic_read(&acb->ccboutstandingcount)); +} + +static ssize_t +arcmsr_attr_host_driver_reset(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "SCSI Host Resets: %4d\n", + acb->num_resets); +} + +static ssize_t +arcmsr_attr_host_driver_abort(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "SCSI Aborts/Timeouts: %4d\n", + acb->num_aborts); +} + +static ssize_t +arcmsr_attr_host_fw_model(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + return snprintf(buf, PAGE_SIZE, + "Adapter Model: %s\n", + acb->firm_model); +} + +static ssize_t +arcmsr_attr_host_fw_version(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "Firmware Version: %s\n", + acb->firm_version); +} + +static ssize_t +arcmsr_attr_host_fw_request_len(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "Reguest Lenth: %4d\n", + acb->firm_request_len); +} + +static ssize_t +arcmsr_attr_host_fw_numbers_queue(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "Numbers of Queue: %4d\n", + acb->firm_numbers_queue); +} + +static ssize_t +arcmsr_attr_host_fw_sdram_size(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "SDRAM Size: %4d\n", + acb->firm_sdram_size); +} + +static ssize_t +arcmsr_attr_host_fw_hd_channels(struct class_device *cdev, char *buf) { + struct Scsi_Host *host = class_to_shost(cdev); + struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; + + return snprintf(buf, PAGE_SIZE, + "Hard Disk Channels: %4d\n", + acb->firm_hd_channels); +} + +static CLASS_DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL); +static CLASS_DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL); +static CLASS_DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL); +static CLASS_DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL); +static CLASS_DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL); +static CLASS_DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL); +static CLASS_DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL); +static CLASS_DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL); +static CLASS_DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL); +static CLASS_DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL); + +struct class_device_attribute *arcmsr_host_attrs[] = { + &class_device_attr_host_driver_version, + &class_device_attr_host_driver_posted_cmd, + &class_device_attr_host_driver_reset, + &class_device_attr_host_driver_abort, + &class_device_attr_host_fw_model, + &class_device_attr_host_fw_version, + &class_device_attr_host_fw_request_len, + &class_device_attr_host_fw_numbers_queue, + &class_device_attr_host_fw_sdram_size, + &class_device_attr_host_fw_hd_channels, + NULL, +}; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c new file mode 100644 index 0000000..475f978 --- /dev/null +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -0,0 +1,1496 @@ +/* +******************************************************************************* +** O.S : Linux +** FILE NAME : arcmsr_hba.c +** BY : Erich Chen +** Description: SCSI RAID Device Driver for +** ARECA RAID Host adapter +******************************************************************************* +** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved +** +** Web site: www.areca.com.tw +** E-mail: erich@areca.com.tw +** +** This program is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License version 2 as +** published by the Free Software Foundation. +** This program is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +******************************************************************************* +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** 3. The name of the author may not be used to endorse or promote products +** derived from this software without specific prior written permission. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT +** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY +** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF +** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************* +** For history of changes, see Documentation/scsi/ChangeLog.arcmsr +** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt +******************************************************************************* +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "arcmsr.h" + +MODULE_AUTHOR("Erich Chen "); +MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ARCMSR_DRIVER_VERSION); + +static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); +static int arcmsr_abort(struct scsi_cmnd *); +static int arcmsr_bus_reset(struct scsi_cmnd *); +static int arcmsr_bios_param(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int *info); +static int arcmsr_queue_command(struct scsi_cmnd * cmd, + void (*done) (struct scsi_cmnd *)); +static int arcmsr_probe(struct pci_dev *pdev, + const struct pci_device_id *id); +static void arcmsr_remove(struct pci_dev *pdev); +static void arcmsr_shutdown(struct pci_dev *pdev); +static void arcmsr_iop_init(struct AdapterControlBlock *acb); +static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); +static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); +static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); +static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb); +static const char *arcmsr_info(struct Scsi_Host *); +static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); + +static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) +{ + if (queue_depth > ARCMSR_MAX_CMD_PERLUN) + queue_depth = ARCMSR_MAX_CMD_PERLUN; + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); + return queue_depth; +} + +static struct scsi_host_template arcmsr_scsi_host_template = { + .module = THIS_MODULE, + .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION, + .info = arcmsr_info, + .queuecommand = arcmsr_queue_command, + .eh_abort_handler = arcmsr_abort, + .eh_bus_reset_handler = arcmsr_bus_reset, + .bios_param = arcmsr_bios_param, + .change_queue_depth = arcmsr_adjust_disk_queue_depth, + .can_queue = ARCMSR_MAX_OUTSTANDING_CMD, + .this_id = ARCMSR_SCSI_INITIATOR_ID, + .sg_tablesize = ARCMSR_MAX_SG_ENTRIES, + .max_sectors = ARCMSR_MAX_XFER_SECTORS, + .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = arcmsr_host_attrs, +}; + +static struct pci_device_id arcmsr_device_id_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, + {0, 0}, /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); +static struct pci_driver arcmsr_pci_driver = { + .name = "arcmsr", + .id_table = arcmsr_device_id_table, + .probe = arcmsr_probe, + .remove = arcmsr_remove, + .shutdown = arcmsr_shutdown +}; + +static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id, + struct pt_regs *regs) +{ + irqreturn_t handle_state; + struct AdapterControlBlock *acb; + unsigned long flags; + + acb = (struct AdapterControlBlock *)dev_id; + + spin_lock_irqsave(acb->host->host_lock, flags); + handle_state = arcmsr_interrupt(acb); + spin_unlock_irqrestore(acb->host->host_lock, flags); + return handle_state; +} + +static int arcmsr_bios_param(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int *geom) +{ + int ret, heads, sectors, cylinders, total_capacity; + unsigned char *buffer;/* return copy of block device's partition table */ + + buffer = scsi_bios_ptable(bdev); + if (buffer) { + ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]); + kfree(buffer); + if (ret != -1) + return ret; + } + total_capacity = capacity; + heads = 64; + sectors = 32; + cylinders = total_capacity / (heads * sectors); + if (cylinders > 1024) { + heads = 255; + sectors = 63; + cylinders = total_capacity / (heads * sectors); + } + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + return 0; +} + +static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) +{ + struct pci_dev *pdev = acb->pdev; + struct MessageUnit __iomem *reg = acb->pmu; + u32 ccb_phyaddr_hi32; + void *dma_coherent; + dma_addr_t dma_coherent_handle, dma_addr; + struct CommandControlBlock *ccb_tmp; + int i, j; + + dma_coherent = dma_alloc_coherent(&pdev->dev, + ARCMSR_MAX_FREECCB_NUM * + sizeof (struct CommandControlBlock) + 0x20, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) + return -ENOMEM; + + acb->dma_coherent = dma_coherent; + acb->dma_coherent_handle = dma_coherent_handle; + + if (((unsigned long)dma_coherent & 0x1F)) { + dma_coherent = dma_coherent + + (0x20 - ((unsigned long)dma_coherent & 0x1F)); + dma_coherent_handle = dma_coherent_handle + + (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); + } + + dma_addr = dma_coherent_handle; + ccb_tmp = (struct CommandControlBlock *)dma_coherent; + for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { + ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5; + ccb_tmp->acb = acb; + acb->pccb_pool[i] = ccb_tmp; + list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); + dma_addr = dma_addr + sizeof (struct CommandControlBlock); + ccb_tmp++; + } + + acb->vir2phy_offset = (unsigned long)ccb_tmp - + (unsigned long)dma_addr; + for (i = 0; i < ARCMSR_MAX_TARGETID; i++) + for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) + acb->devstate[i][j] = ARECA_RAID_GOOD; + + /* + ** here we need to tell iop 331 our ccb_tmp.HighPart + ** if ccb_tmp.HighPart is not zero + */ + ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16); + if (ccb_phyaddr_hi32 != 0) { + writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->message_rwbuffer[0]); + writel(ccb_phyaddr_hi32, ®->message_rwbuffer[1]); + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); + if (arcmsr_wait_msgint_ready(acb)) + printk(KERN_NOTICE "arcmsr%d: " + "'set ccb high part physical address' timeout\n", + acb->host->host_no); + } + + writel(readl(®->outbound_intmask) | + ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, + ®->outbound_intmask); + return 0; +} + +static int arcmsr_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct Scsi_Host *host; + struct AdapterControlBlock *acb; + uint8_t bus, dev_fun; + int error; + + error = pci_enable_device(pdev); + if (error) + goto out; + pci_set_master(pdev); + + host = scsi_host_alloc(&arcmsr_scsi_host_template, + sizeof(struct AdapterControlBlock)); + if (!host) { + error = -ENOMEM; + goto out_disable_device; + } + acb = (struct AdapterControlBlock *)host->hostdata; + memset(acb, 0, sizeof (struct AdapterControlBlock)); + + error = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (error) { + error = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (error) { + printk(KERN_WARNING + "scsi%d: No suitable DMA mask available\n", + host->host_no); + goto out_host_put; + } + } + bus = pdev->bus->number; + dev_fun = pdev->devfn; + acb->host = host; + acb->pdev = pdev; + host->max_sectors = ARCMSR_MAX_XFER_SECTORS; + host->max_lun = ARCMSR_MAX_TARGETLUN; + host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/ + host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/ + host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES; + host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ + host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; + host->this_id = ARCMSR_SCSI_INITIATOR_ID; + host->unique_id = (bus << 8) | dev_fun; + host->irq = pdev->irq; + error = pci_request_regions(pdev, "arcmsr"); + if (error) + goto out_host_put; + + acb->pmu = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!acb->pmu) { + printk(KERN_NOTICE "arcmsr%d: memory" + " mapping region fail \n", acb->host->host_no); + goto out_release_regions; + } + acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_RQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); + acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; + INIT_LIST_HEAD(&acb->ccb_free_list); + + error = arcmsr_alloc_ccb_pool(acb); + if (error) + goto out_iounmap; + + error = request_irq(pdev->irq, arcmsr_do_interrupt, + SA_INTERRUPT | SA_SHIRQ, "arcmsr", acb); + if (error) + goto out_free_ccb_pool; + + arcmsr_iop_init(acb); + pci_set_drvdata(pdev, host); + + error = scsi_add_host(host, &pdev->dev); + if (error) + goto out_free_irq; + + error = arcmsr_alloc_sysfs_attr(acb); + if (error) + goto out_free_sysfs; + + scsi_scan_host(host); + return 0; + out_free_sysfs: + out_free_irq: + free_irq(pdev->irq, acb); + out_free_ccb_pool: + arcmsr_free_ccb_pool(acb); + out_iounmap: + iounmap(acb->pmu); + out_release_regions: + pci_release_regions(pdev); + out_host_put: + scsi_host_put(host); + out_disable_device: + pci_disable_device(pdev); + out: + return error; +} + +static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + + writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); + if (arcmsr_wait_msgint_ready(acb)) + printk(KERN_NOTICE + "arcmsr%d: wait 'abort all outstanding command' timeout \n" + , acb->host->host_no); +} + +static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) +{ + struct AdapterControlBlock *acb = ccb->acb; + struct scsi_cmnd *pcmd = ccb->pcmd; + + if (pcmd->use_sg != 0) { + struct scatterlist *sl; + + sl = (struct scatterlist *)pcmd->request_buffer; + pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction); + } + else if (pcmd->request_bufflen != 0) + pci_unmap_single(acb->pdev, + pcmd->SCp.dma_handle, + pcmd->request_bufflen, pcmd->sc_data_direction); +} + +static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) +{ + struct AdapterControlBlock *acb = ccb->acb; + struct scsi_cmnd *pcmd = ccb->pcmd; + + arcmsr_pci_unmap_dma(ccb); + if (stand_flag == 1) + atomic_dec(&acb->ccboutstandingcount); + ccb->startdone = ARCMSR_CCB_DONE; + ccb->ccb_flags = 0; + list_add_tail(&ccb->list, &acb->ccb_free_list); + pcmd->scsi_done(pcmd); +} + +static void arcmsr_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + struct MessageUnit __iomem *reg = acb->pmu; + int poll_count = 0; + + arcmsr_free_sysfs_attr(acb); + scsi_remove_host(host); + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + writel(readl(®->outbound_intmask) | + ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, + ®->outbound_intmask); + acb->acb_flags |= ACB_F_SCSISTOPADAPTER; + acb->acb_flags &= ~ACB_F_IOP_INITED; + + for (poll_count = 0; poll_count < 256; poll_count++) { + if (!atomic_read(&acb->ccboutstandingcount)) + break; + arcmsr_interrupt(acb); + msleep(25); + } + + if (atomic_read(&acb->ccboutstandingcount)) { + int i; + + arcmsr_abort_allcmd(acb); + for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++) + readl(®->outbound_queueport); + for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { + struct CommandControlBlock *ccb = acb->pccb_pool[i]; + if (ccb->startdone == ARCMSR_CCB_START) { + ccb->startdone = ARCMSR_CCB_ABORTED; + ccb->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(ccb, 1); + } + } + } + + free_irq(pdev->irq, acb); + iounmap(acb->pmu); + arcmsr_free_ccb_pool(acb); + pci_release_regions(pdev); + + scsi_host_put(host); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static void arcmsr_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)host->hostdata; + + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); +} + +static int arcmsr_module_init(void) +{ + int error = 0; + + error = pci_register_driver(&arcmsr_pci_driver); + return error; +} + +static void arcmsr_module_exit(void) +{ + pci_unregister_driver(&arcmsr_pci_driver); +} +module_init(arcmsr_module_init); +module_exit(arcmsr_module_exit); + +static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + u32 orig_mask = readl(®->outbound_intmask); + + writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, + ®->outbound_intmask); + return orig_mask; +} + +static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, + u32 orig_mask) +{ + struct MessageUnit __iomem *reg = acb->pmu; + u32 mask; + + mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | + ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); + writel(mask, ®->outbound_intmask); +} + +static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg=acb->pmu; + + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); + if (arcmsr_wait_msgint_ready(acb)) + printk(KERN_NOTICE + "arcmsr%d: wait 'flush adapter cache' timeout \n" + , acb->host->host_no); +} + +static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) +{ + struct scsi_cmnd *pcmd = ccb->pcmd; + struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; + + pcmd->result = DID_OK << 16; + if (sensebuffer) { + int sense_data_length = + sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer) + ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer); + memset(sensebuffer, 0, sizeof (pcmd->sense_buffer)); + memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); + sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; + sensebuffer->Valid = 1; + } +} + +static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + uint32_t Index; + uint8_t Retries = 0x00; + + do { + for (Index = 0; Index < 100; Index++) { + if (readl(®->outbound_intstatus) + & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT + , ®->outbound_intstatus); + return 0x00; + } + msleep_interruptible(10); + }/*max 1 seconds*/ + } while (Retries++ < 20);/*max 20 sec*/ + return 0xff; +} + +static void arcmsr_build_ccb(struct AdapterControlBlock *acb, + struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) +{ + struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; + int8_t *psge = (int8_t *)&arcmsr_cdb->u; + uint32_t address_lo, address_hi; + int arccdbsize = 0x30; + + ccb->pcmd = pcmd; + memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB)); + arcmsr_cdb->Bus = 0; + arcmsr_cdb->TargetID = pcmd->device->id; + arcmsr_cdb->LUN = pcmd->device->lun; + arcmsr_cdb->Function = 1; + arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len; + arcmsr_cdb->Context = (unsigned long)arcmsr_cdb; + memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); + if (pcmd->use_sg) { + int length, sgcount, i, cdb_sgcount = 0; + struct scatterlist *sl; + + /* Get Scatter Gather List from scsiport. */ + sl = (struct scatterlist *) pcmd->request_buffer; + sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg, + pcmd->sc_data_direction); + /* map stor port SG list to our iop SG List. */ + for (i = 0; i < sgcount; i++) { + /* Get the physical address of the current data pointer */ + length = cpu_to_le32(sg_dma_len(sl)); + address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl))); + address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl))); + if (address_hi == 0) { + struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; + + pdma_sg->address = address_lo; + pdma_sg->length = length; + psge += sizeof (struct SG32ENTRY); + arccdbsize += sizeof (struct SG32ENTRY); + } else { + struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; + + pdma_sg->addresshigh = address_hi; + pdma_sg->address = address_lo; + pdma_sg->length = length|IS_SG64_ADDR; + psge += sizeof (struct SG64ENTRY); + arccdbsize += sizeof (struct SG64ENTRY); + } + sl++; + cdb_sgcount++; + } + arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount; + arcmsr_cdb->DataLength = pcmd->request_bufflen; + if ( arccdbsize > 256) + arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; + } else if (pcmd->request_bufflen) { + dma_addr_t dma_addr; + dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer, + pcmd->request_bufflen, pcmd->sc_data_direction); + pcmd->SCp.dma_handle = dma_addr; + address_lo = cpu_to_le32(dma_addr_lo32(dma_addr)); + address_hi = cpu_to_le32(dma_addr_hi32(dma_addr)); + if (address_hi == 0) { + struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; + pdma_sg->address = address_lo; + pdma_sg->length = pcmd->request_bufflen; + } else { + struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; + pdma_sg->addresshigh = address_hi; + pdma_sg->address = address_lo; + pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR; + } + arcmsr_cdb->sgcount = 1; + arcmsr_cdb->DataLength = pcmd->request_bufflen; + } + if (pcmd->sc_data_direction == DMA_TO_DEVICE ) { + arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; + ccb->ccb_flags |= CCB_FLAG_WRITE; + } +} + +static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr; + struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; + + atomic_inc(&acb->ccboutstandingcount); + ccb->startdone = ARCMSR_CCB_START; + if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) + writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, + ®->inbound_queueport); + else + writel(cdb_shifted_phyaddr, ®->inbound_queueport); +} + +void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; + uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data; + int32_t allxfer_len = 0; + + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { + acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); + while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) + && (allxfer_len < 124)) { + writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data); + acb->wqbuf_firstindex++; + acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + iop_data++; + allxfer_len++; + } + writel(allxfer_len, &pwbuffer->data_len); + writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK + , ®->inbound_doorbell); + } +} + +static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + + acb->acb_flags &= ~ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); + if (arcmsr_wait_msgint_ready(acb)) + printk(KERN_NOTICE + "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" + , acb->host->host_no); +} + +static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) +{ + dma_free_coherent(&acb->pdev->dev, + ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, + acb->dma_coherent, + acb->dma_coherent_handle); +} + +static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + struct CommandControlBlock *ccb; + uint32_t flag_ccb, outbound_intstatus, outbound_doorbell; + + outbound_intstatus = readl(®->outbound_intstatus) + & acb->outbound_int_enable; + writel(outbound_intstatus, ®->outbound_intstatus); + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { + outbound_doorbell = readl(®->outbound_doorbell); + writel(outbound_doorbell, ®->outbound_doorbell); + if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { + struct QBUFFER __iomem * prbuffer = + (struct QBUFFER __iomem *) ®->message_rbuffer; + uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data; + int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; + + rqbuf_lastindex = acb->rqbuf_lastindex; + rqbuf_firstindex = acb->rqbuf_firstindex; + iop_len = readl(&prbuffer->data_len); + my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) + &(ARCMSR_MAX_QBUFFER - 1); + if (my_empty_len >= iop_len) { + while (iop_len > 0) { + acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); + acb->rqbuf_lastindex++; + acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + iop_data++; + iop_len--; + } + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, + ®->inbound_doorbell); + } else + acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; + } + if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { + acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; + if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { + struct QBUFFER __iomem * pwbuffer = + (struct QBUFFER __iomem *) ®->message_wbuffer; + uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data; + int32_t allxfer_len = 0; + + acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); + while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) + && (allxfer_len < 124)) { + writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data); + acb->wqbuf_firstindex++; + acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + iop_data++; + allxfer_len++; + } + writel(allxfer_len, &pwbuffer->data_len); + writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, + ®->inbound_doorbell); + } + if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) + acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; + } + } + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { + int id, lun; + /* + **************************************************************** + ** areca cdb command done + **************************************************************** + */ + while (1) { + if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) + break;/*chip FIFO no ccb for completion already*/ + /* check if command done with no error*/ + ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + + (flag_ccb << 5)); + if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { + if (ccb->startdone == ARCMSR_CCB_ABORTED) { + struct scsi_cmnd *abortcmd=ccb->pcmd; + if (abortcmd) { + abortcmd->result |= DID_ABORT >> 16; + arcmsr_ccb_complete(ccb, 1); + printk(KERN_NOTICE + "arcmsr%d: ccb='0x%p' isr got aborted command \n" + , acb->host->host_no, ccb); + } + continue; + } + printk(KERN_NOTICE + "arcmsr%d: isr get an illegal ccb command done acb='0x%p'" + "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x" + " ccboutstandingcount=%d \n" + , acb->host->host_no + , acb + , ccb + , ccb->acb + , ccb->startdone + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + id = ccb->pcmd->device->id; + lun = ccb->pcmd->device->lun; + if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) { + if (acb->devstate[id][lun] == ARECA_RAID_GONE) + acb->devstate[id][lun] = ARECA_RAID_GOOD; + ccb->pcmd->result = DID_OK << 16; + arcmsr_ccb_complete(ccb, 1); + } else { + switch(ccb->arcmsr_cdb.DeviceStatus) { + case ARCMSR_DEV_SELECT_TIMEOUT: { + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_TIME_OUT << 16; + arcmsr_ccb_complete(ccb, 1); + } + break; + case ARCMSR_DEV_ABORTED: + case ARCMSR_DEV_INIT_FAIL: { + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_BAD_TARGET << 16; + arcmsr_ccb_complete(ccb, 1); + } + break; + case ARCMSR_DEV_CHECK_CONDITION: { + acb->devstate[id][lun] = ARECA_RAID_GOOD; + arcmsr_report_sense_info(ccb); + arcmsr_ccb_complete(ccb, 1); + } + break; + default: + printk(KERN_NOTICE + "arcmsr%d: scsi id=%d lun=%d" + " isr get command error done," + "but got unknown DeviceStatus = 0x%x \n" + , acb->host->host_no + , id + , lun + , ccb->arcmsr_cdb.DeviceStatus); + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_NO_CONNECT << 16; + arcmsr_ccb_complete(ccb, 1); + break; + } + } + }/*drain reply FIFO*/ + } + if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) + return IRQ_NONE; + return IRQ_HANDLED; +} + +static void arcmsr_iop_parking(struct AdapterControlBlock *acb) +{ + if (acb) { + /* stop adapter background rebuild */ + if (acb->acb_flags & ACB_F_MSG_START_BGRB) { + acb->acb_flags &= ~ACB_F_MSG_START_BGRB; + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); + } + } +} + +static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) +{ + struct MessageUnit __iomem *reg = acb->pmu; + struct CMD_MESSAGE_FIELD *pcmdmessagefld; + int retvalue = 0, transfer_len = 0; + char *buffer; + uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | + (uint32_t ) cmd->cmnd[6] << 16 | + (uint32_t ) cmd->cmnd[7] << 8 | + (uint32_t ) cmd->cmnd[8]; + /* 4 bytes: Areca io control code */ + if (cmd->use_sg) { + struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer; + + buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; + if (cmd->use_sg > 1) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + transfer_len += sg->length; + } else { + buffer = cmd->request_buffer; + transfer_len = cmd->request_bufflen; + } + if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; + switch(controlcode) { + case ARCMSR_MESSAGE_READ_RQBUFFER: { + unsigned long *ver_addr; + dma_addr_t buf_handle; + uint8_t *pQbuffer, *ptmpQbuffer; + int32_t allxfer_len = 0; + + ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); + if (!ver_addr) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + ptmpQbuffer = (uint8_t *) ver_addr; + while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) + && (allxfer_len < 1031)) { + pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; + memcpy(ptmpQbuffer, pQbuffer, 1); + acb->rqbuf_firstindex++; + acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; + ptmpQbuffer++; + allxfer_len++; + } + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *) + ®->message_rbuffer; + uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data; + int32_t iop_len; + + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + iop_len = readl(&prbuffer->data_len); + while (iop_len > 0) { + acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); + acb->rqbuf_lastindex++; + acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + iop_data++; + iop_len--; + } + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, + ®->inbound_doorbell); + } + memcpy(pcmdmessagefld->messagedatabuffer, + (uint8_t *)ver_addr, allxfer_len); + pcmdmessagefld->cmdmessage.Length = allxfer_len; + pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; + pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); + } + break; + case ARCMSR_MESSAGE_WRITE_WQBUFFER: { + unsigned long *ver_addr; + dma_addr_t buf_handle; + int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; + uint8_t *pQbuffer, *ptmpuserbuffer; + + ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); + if (!ver_addr) { + retvalue = ARCMSR_MESSAGE_FAIL; + goto message_out; + } + ptmpuserbuffer = (uint8_t *)ver_addr; + user_len = pcmdmessagefld->cmdmessage.Length; + memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); + wqbuf_lastindex = acb->wqbuf_lastindex; + wqbuf_firstindex = acb->wqbuf_firstindex; + if (wqbuf_lastindex != wqbuf_firstindex) { + struct SENSE_DATA *sensebuffer = + (struct SENSE_DATA *)cmd->sense_buffer; + arcmsr_post_Qbuffer(acb); + /* has error report sensedata */ + sensebuffer->ErrorCode = 0x70; + sensebuffer->SenseKey = ILLEGAL_REQUEST; + sensebuffer->AdditionalSenseLength = 0x0A; + sensebuffer->AdditionalSenseCode = 0x20; + sensebuffer->Valid = 1; + retvalue = ARCMSR_MESSAGE_FAIL; + } else { + my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) + &(ARCMSR_MAX_QBUFFER - 1); + if (my_empty_len >= user_len) { + while (user_len > 0) { + pQbuffer = + &acb->wqbuffer[acb->wqbuf_lastindex]; + memcpy(pQbuffer, ptmpuserbuffer, 1); + acb->wqbuf_lastindex++; + acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; + ptmpuserbuffer++; + user_len--; + } + if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { + acb->acb_flags &= + ~ACB_F_MESSAGE_WQBUFFER_CLEARED; + arcmsr_post_Qbuffer(acb); + } + } else { + /* has error report sensedata */ + struct SENSE_DATA *sensebuffer = + (struct SENSE_DATA *)cmd->sense_buffer; + sensebuffer->ErrorCode = 0x70; + sensebuffer->SenseKey = ILLEGAL_REQUEST; + sensebuffer->AdditionalSenseLength = 0x0A; + sensebuffer->AdditionalSenseCode = 0x20; + sensebuffer->Valid = 1; + retvalue = ARCMSR_MESSAGE_FAIL; + } + } + pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); + } + break; + case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { + uint8_t *pQbuffer = acb->rqbuffer; + + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, + ®->inbound_doorbell); + } + acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; + acb->rqbuf_firstindex = 0; + acb->rqbuf_lastindex = 0; + memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + } + break; + case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { + uint8_t *pQbuffer = acb->wqbuffer; + + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK + , ®->inbound_doorbell); + } + acb->acb_flags |= + (ACB_F_MESSAGE_WQBUFFER_CLEARED | + ACB_F_MESSAGE_WQBUFFER_READED); + acb->wqbuf_firstindex = 0; + acb->wqbuf_lastindex = 0; + memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_OK; + } + break; + case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { + uint8_t *pQbuffer; + + if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { + acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK + , ®->inbound_doorbell); + } + acb->acb_flags |= + (ACB_F_MESSAGE_WQBUFFER_CLEARED + | ACB_F_MESSAGE_RQBUFFER_CLEARED + | ACB_F_MESSAGE_WQBUFFER_READED); + acb->rqbuf_firstindex = 0; + acb->rqbuf_lastindex = 0; + acb->wqbuf_firstindex = 0; + acb->wqbuf_lastindex = 0; + pQbuffer = acb->rqbuffer; + memset(pQbuffer, 0, sizeof (struct QBUFFER)); + pQbuffer = acb->wqbuffer; + memset(pQbuffer, 0, sizeof (struct QBUFFER)); + pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; + } + break; + case ARCMSR_MESSAGE_RETURN_CODE_3F: { + pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; + } + break; + case ARCMSR_MESSAGE_SAY_HELLO: { + int8_t * hello_string = "Hello! I am ARCMSR"; + + memcpy(pcmdmessagefld->messagedatabuffer, hello_string + , (int16_t)strlen(hello_string)); + pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; + } + break; + case ARCMSR_MESSAGE_SAY_GOODBYE: + arcmsr_iop_parking(acb); + break; + case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: + arcmsr_flush_adapter_cache(acb); + break; + default: + retvalue = ARCMSR_MESSAGE_FAIL; + } + message_out: + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + kunmap_atomic(buffer - sg->offset, KM_IRQ0); + } + return retvalue; +} + +static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) +{ + struct list_head *head = &acb->ccb_free_list; + struct CommandControlBlock *ccb = NULL; + + if (!list_empty(head)) { + ccb = list_entry(head->next, struct CommandControlBlock, list); + list_del(head->next); + } + return ccb; +} + +static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, + struct scsi_cmnd *cmd) +{ + switch (cmd->cmnd[0]) { + case INQUIRY: { + unsigned char inqdata[36]; + char *buffer; + + if (cmd->device->lun) { + cmd->result = (DID_TIME_OUT << 16); + cmd->scsi_done(cmd); + return; + } + inqdata[0] = TYPE_PROCESSOR; + /* Periph Qualifier & Periph Dev Type */ + inqdata[1] = 0; + /* rem media bit & Dev Type Modifier */ + inqdata[2] = 0; + /* ISO,ECMA,& ANSI versions */ + inqdata[4] = 31; + /* length of additional data */ + strncpy(&inqdata[8], "Areca ", 8); + /* Vendor Identification */ + strncpy(&inqdata[16], "RAID controller ", 16); + /* Product Identification */ + strncpy(&inqdata[32], "R001", 4); /* Product Revision */ + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; + } else { + buffer = cmd->request_buffer; + } + memcpy(buffer, inqdata, sizeof(inqdata)); + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + kunmap_atomic(buffer - sg->offset, KM_IRQ0); + } + cmd->scsi_done(cmd); + } + break; + case WRITE_BUFFER: + case READ_BUFFER: { + if (arcmsr_iop_message_xfer(acb, cmd)) + cmd->result = (DID_ERROR << 16); + cmd->scsi_done(cmd); + } + break; + default: + cmd->scsi_done(cmd); + } +} + +static int arcmsr_queue_command(struct scsi_cmnd *cmd, + void (* done)(struct scsi_cmnd *)) +{ + struct Scsi_Host *host = cmd->device->host; + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + struct CommandControlBlock *ccb; + int target = cmd->device->id; + int lun = cmd->device->lun; + + cmd->scsi_done = done; + cmd->host_scribble = NULL; + cmd->result = 0; + if (acb->acb_flags & ACB_F_BUS_RESET) { + printk(KERN_NOTICE "arcmsr%d: bus reset" + " and return busy \n" + , acb->host->host_no); + return SCSI_MLQUEUE_HOST_BUSY; + } + if(target == 16) { + /* virtual device for iop message transfer */ + arcmsr_handle_virtual_command(acb, cmd); + return 0; + } + if (acb->devstate[target][lun] == ARECA_RAID_GONE) { + uint8_t block_cmd; + + block_cmd = cmd->cmnd[0] & 0x0f; + if (block_cmd == 0x08 || block_cmd == 0x0a) { + printk(KERN_NOTICE + "arcmsr%d: block 'read/write'" + "command with gone raid volume" + " Cmd=%2x, TargetId=%d, Lun=%d \n" + , acb->host->host_no + , cmd->cmnd[0] + , target, lun); + cmd->result = (DID_NO_CONNECT << 16); + cmd->scsi_done(cmd); + return 0; + } + } + if (atomic_read(&acb->ccboutstandingcount) >= + ARCMSR_MAX_OUTSTANDING_CMD) + return SCSI_MLQUEUE_HOST_BUSY; + + ccb = arcmsr_get_freeccb(acb); + if (!ccb) + return SCSI_MLQUEUE_HOST_BUSY; + arcmsr_build_ccb(acb, ccb, cmd); + arcmsr_post_ccb(acb, ccb); + return 0; +} + +static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + char *acb_firm_model = acb->firm_model; + char *acb_firm_version = acb->firm_version; + char __iomem *iop_firm_model = (char __iomem *) ®->message_rwbuffer[15]; + char __iomem *iop_firm_version = (char __iomem *) ®->message_rwbuffer[17]; + int count; + + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + if (arcmsr_wait_msgint_ready(acb)) + printk(KERN_NOTICE + "arcmsr%d: wait " + "'get adapter firmware miscellaneous data' timeout \n" + , acb->host->host_no); + count = 8; + while (count) { + *acb_firm_model = readb(iop_firm_model); + acb_firm_model++; + iop_firm_model++; + count--; + } + count = 16; + while (count) { + *acb_firm_version = readb(iop_firm_version); + acb_firm_version++; + iop_firm_version++; + count--; + } + printk(KERN_INFO + "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" + , acb->host->host_no + , acb->firm_version); + acb->firm_request_len = readl(®->message_rwbuffer[1]); + acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); + acb->firm_sdram_size = readl(®->message_rwbuffer[3]); + acb->firm_hd_channels = readl(®->message_rwbuffer[4]); +} + +static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, + struct CommandControlBlock *poll_ccb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + struct CommandControlBlock *ccb; + uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; + int id, lun; + + polling_ccb_retry: + poll_count++; + outbound_intstatus = readl(®->outbound_intstatus) + & acb->outbound_int_enable; + writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ + while (1) { + if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) { + if (poll_ccb_done) + break; + else { + msleep(25); + if (poll_count > 100) + break; + goto polling_ccb_retry; + } + } + ccb = (struct CommandControlBlock *) + (acb->vir2phy_offset + (flag_ccb << 5)); + if ((ccb->acb != acb) || + (ccb->startdone != ARCMSR_CCB_START)) { + if ((ccb->startdone == ARCMSR_CCB_ABORTED) || + (ccb == poll_ccb)) { + printk(KERN_NOTICE + "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'" + " poll command abort successfully \n" + , acb->host->host_no + , ccb->pcmd->device->id + , ccb->pcmd->device->lun + , ccb); + ccb->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(ccb, 1); + poll_ccb_done = 1; + continue; + } + printk(KERN_NOTICE + "arcmsr%d: polling get an illegal ccb" + " command done ccb='0x%p'" + "ccboutstandingcount=%d \n" + , acb->host->host_no + , ccb + , atomic_read(&acb->ccboutstandingcount)); + continue; + } + id = ccb->pcmd->device->id; + lun = ccb->pcmd->device->lun; + if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) { + if (acb->devstate[id][lun] == ARECA_RAID_GONE) + acb->devstate[id][lun] = ARECA_RAID_GOOD; + ccb->pcmd->result = DID_OK << 16; + arcmsr_ccb_complete(ccb, 1); + } else { + switch(ccb->arcmsr_cdb.DeviceStatus) { + case ARCMSR_DEV_SELECT_TIMEOUT: { + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_TIME_OUT << 16; + arcmsr_ccb_complete(ccb, 1); + } + break; + case ARCMSR_DEV_ABORTED: + case ARCMSR_DEV_INIT_FAIL: { + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_BAD_TARGET << 16; + arcmsr_ccb_complete(ccb, 1); + } + break; + case ARCMSR_DEV_CHECK_CONDITION: { + acb->devstate[id][lun] = ARECA_RAID_GOOD; + arcmsr_report_sense_info(ccb); + arcmsr_ccb_complete(ccb, 1); + } + break; + default: + printk(KERN_NOTICE + "arcmsr%d: scsi id=%d lun=%d" + " polling and getting command error done" + "but got unknown DeviceStatus = 0x%x \n" + , acb->host->host_no + , id + , lun + , ccb->arcmsr_cdb.DeviceStatus); + acb->devstate[id][lun] = ARECA_RAID_GONE; + ccb->pcmd->result = DID_BAD_TARGET << 16; + arcmsr_ccb_complete(ccb, 1); + break; + } + } + } +} + +static void arcmsr_iop_init(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0; + + do { + firmware_state = readl(®->outbound_msgaddr1); + } while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)); + intmask_org = readl(®->outbound_intmask) + | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; + arcmsr_get_firmware_spec(acb); + + acb->acb_flags |= ACB_F_MSG_START_BGRB; + writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); + if (arcmsr_wait_msgint_ready(acb)) { + printk(KERN_NOTICE "arcmsr%d: " + "wait 'start adapter background rebulid' timeout\n", + acb->host->host_no); + } + + outbound_doorbell = readl(®->outbound_doorbell); + writel(outbound_doorbell, ®->outbound_doorbell); + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); + mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE + | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); + writel(intmask_org & mask, ®->outbound_intmask); + acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; + acb->acb_flags |= ACB_F_IOP_INITED; +} + +static void arcmsr_iop_reset(struct AdapterControlBlock *acb) +{ + struct MessageUnit __iomem *reg = acb->pmu; + struct CommandControlBlock *ccb; + uint32_t intmask_org; + int i = 0; + + if (atomic_read(&acb->ccboutstandingcount) != 0) { + /* talk to iop 331 outstanding command aborted */ + arcmsr_abort_allcmd(acb); + /* wait for 3 sec for all command aborted*/ + msleep_interruptible(3000); + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + /* clear all outbound posted Q */ + for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++) + readl(®->outbound_queueport); + for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { + ccb = acb->pccb_pool[i]; + if ((ccb->startdone == ARCMSR_CCB_START) || + (ccb->startdone == ARCMSR_CCB_ABORTED)) { + ccb->startdone = ARCMSR_CCB_ABORTED; + ccb->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(ccb, 1); + } + } + /* enable all outbound interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + } + atomic_set(&acb->ccboutstandingcount, 0); +} + +static int arcmsr_bus_reset(struct scsi_cmnd *cmd) +{ + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)cmd->device->host->hostdata; + int i; + + acb->num_resets++; + acb->acb_flags |= ACB_F_BUS_RESET; + for (i = 0; i < 400; i++) { + if (!atomic_read(&acb->ccboutstandingcount)) + break; + arcmsr_interrupt(acb); + msleep(25); + } + arcmsr_iop_reset(acb); + acb->acb_flags &= ~ACB_F_BUS_RESET; + return SUCCESS; +} + +static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, + struct CommandControlBlock *ccb) +{ + u32 intmask; + + ccb->startdone = ARCMSR_CCB_ABORTED; + + /* + ** Wait for 3 sec for all command done. + */ + msleep_interruptible(3000); + + intmask = arcmsr_disable_outbound_ints(acb); + arcmsr_polling_ccbdone(acb, ccb); + arcmsr_enable_outbound_ints(acb, intmask); +} + +static int arcmsr_abort(struct scsi_cmnd *cmd) +{ + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *)cmd->device->host->hostdata; + int i = 0; + + printk(KERN_NOTICE + "arcmsr%d: abort device command of scsi id=%d lun=%d \n", + acb->host->host_no, cmd->device->id, cmd->device->lun); + acb->num_aborts++; + + /* + ************************************************ + ** the all interrupt service routine is locked + ** we need to handle it as soon as possible and exit + ************************************************ + */ + if (!atomic_read(&acb->ccboutstandingcount)) + return SUCCESS; + + for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { + struct CommandControlBlock *ccb = acb->pccb_pool[i]; + if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { + arcmsr_abort_one_cmd(acb, ccb); + break; + } + } + + return SUCCESS; +} + +static const char *arcmsr_info(struct Scsi_Host *host) +{ + struct AdapterControlBlock *acb = + (struct AdapterControlBlock *) host->hostdata; + static char buf[256]; + char *type; + int raid6 = 1; + + switch (acb->pdev->device) { + case PCI_DEVICE_ID_ARECA_1110: + case PCI_DEVICE_ID_ARECA_1210: + raid6 = 0; + /*FALLTHRU*/ + case PCI_DEVICE_ID_ARECA_1120: + case PCI_DEVICE_ID_ARECA_1130: + case PCI_DEVICE_ID_ARECA_1160: + case PCI_DEVICE_ID_ARECA_1170: + case PCI_DEVICE_ID_ARECA_1220: + case PCI_DEVICE_ID_ARECA_1230: + case PCI_DEVICE_ID_ARECA_1260: + case PCI_DEVICE_ID_ARECA_1270: + case PCI_DEVICE_ID_ARECA_1280: + type = "SATA"; + break; + case PCI_DEVICE_ID_ARECA_1380: + case PCI_DEVICE_ID_ARECA_1381: + case PCI_DEVICE_ID_ARECA_1680: + case PCI_DEVICE_ID_ARECA_1681: + type = "SAS"; + break; + default: + type = "X-TYPE"; + break; + } + sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s", + type, raid6 ? "( RAID6 capable)" : "", + ARCMSR_DRIVER_VERSION); + return buf; +} + + diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c09396d..df7b6267 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2004,6 +2004,23 @@ #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb +#define PCI_VENDOR_ID_ARECA 0x17d3 +#define PCI_DEVICE_ID_ARECA_1110 0x1110 +#define PCI_DEVICE_ID_ARECA_1120 0x1120 +#define PCI_DEVICE_ID_ARECA_1130 0x1130 +#define PCI_DEVICE_ID_ARECA_1160 0x1160 +#define PCI_DEVICE_ID_ARECA_1170 0x1170 +#define PCI_DEVICE_ID_ARECA_1210 0x1210 +#define PCI_DEVICE_ID_ARECA_1220 0x1220 +#define PCI_DEVICE_ID_ARECA_1230 0x1230 +#define PCI_DEVICE_ID_ARECA_1260 0x1260 +#define PCI_DEVICE_ID_ARECA_1270 0x1270 +#define PCI_DEVICE_ID_ARECA_1280 0x1280 +#define PCI_DEVICE_ID_ARECA_1380 0x1380 +#define PCI_DEVICE_ID_ARECA_1381 0x1381 +#define PCI_DEVICE_ID_ARECA_1680 0x1680 +#define PCI_DEVICE_ID_ARECA_1681 0x1681 + #define PCI_VENDOR_ID_S2IO 0x17d5 #define PCI_DEVICE_ID_S2IO_WIN 0x5731 #define PCI_DEVICE_ID_S2IO_UNI 0x5831 -- cgit v0.10.2 From 43d6b68dc38867e489995e21649bb82f6ee7b5d3 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 29 Jul 2006 11:14:08 -0700 Subject: [SCSI] areca sysfs fix Remove sysfs_remove_bin_file() return-value checking from the areca driver. There's nothing a driver can do if sysfs file removal fails, so we'll soon be changing sysfs_remove_bin_file() to internally print a diagnostic and to return void. Cc: Erich Chen Signed-off-by: Andrew Morton Signed-off-by: James Bottomley diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index 0459f41..c96f714 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -240,15 +240,11 @@ int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb) } return 0; error_bin_file_message_clear: - error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + sysfs_remove_bin_file(&host->shost_classdev.kobj, &arcmsr_sysfs_message_write_attr); - if (error) - printk(KERN_ERR "arcmsr: sysfs_remove_bin_file mu_write failed\n"); error_bin_file_message_write: - error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + sysfs_remove_bin_file(&host->shost_classdev.kobj, &arcmsr_sysfs_message_read_attr); - if (error) - printk(KERN_ERR "arcmsr: sysfs_remove_bin_file mu_read failed\n"); error_bin_file_message_read: return error; } @@ -256,20 +252,13 @@ error_bin_file_message_read: void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) { struct Scsi_Host *host = acb->host; - int error; - error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + sysfs_remove_bin_file(&host->shost_classdev.kobj, &arcmsr_sysfs_message_clear_attr); - if (error) - printk(KERN_ERR "arcmsr: free sysfs mu_clear failed\n"); - error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + sysfs_remove_bin_file(&host->shost_classdev.kobj, &arcmsr_sysfs_message_write_attr); - if (error) - printk(KERN_ERR "arcmsr: free sysfs mu_write failed\n"); - error = sysfs_remove_bin_file(&host->shost_classdev.kobj, + sysfs_remove_bin_file(&host->shost_classdev.kobj, &arcmsr_sysfs_message_read_attr); - if (error) - printk(KERN_ERR "arcmsr: free sysfss mu_read failed\n"); } -- cgit v0.10.2 From d67a70aca200f67be42428e74eb3353f20ad1130 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Fri, 28 Jul 2006 17:36:46 -0500 Subject: [SCSI] arcmsr: fix up sysfs values The sysfs files in arcmsr are non-standard in that they aren't simple filename value pairs, the values actually contain preceeding text which would have to be parsed. The idea of sysfs files is that the file name is the description and the contents is a simple value. Fix up arcmsr to conform to this standard. Acked-By: Erich Chen Signed-off-by: James Bottomley diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index c96f714..12497da 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -265,7 +265,7 @@ arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) { static ssize_t arcmsr_attr_host_driver_version(struct class_device *cdev, char *buf) { return snprintf(buf, PAGE_SIZE, - "ARCMSR: %s\n", + "%s\n", ARCMSR_DRIVER_VERSION); } @@ -274,7 +274,7 @@ arcmsr_attr_host_driver_posted_cmd(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "Current commands posted: %4d\n", + "%4d\n", atomic_read(&acb->ccboutstandingcount)); } @@ -283,7 +283,7 @@ arcmsr_attr_host_driver_reset(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "SCSI Host Resets: %4d\n", + "%4d\n", acb->num_resets); } @@ -292,7 +292,7 @@ arcmsr_attr_host_driver_abort(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "SCSI Aborts/Timeouts: %4d\n", + "%4d\n", acb->num_aborts); } @@ -301,7 +301,7 @@ arcmsr_attr_host_fw_model(struct class_device *cdev, char *buf) { struct Scsi_Host *host = class_to_shost(cdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "Adapter Model: %s\n", + "%s\n", acb->firm_model); } @@ -311,7 +311,7 @@ arcmsr_attr_host_fw_version(struct class_device *cdev, char *buf) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "Firmware Version: %s\n", + "%s\n", acb->firm_version); } @@ -321,7 +321,7 @@ arcmsr_attr_host_fw_request_len(struct class_device *cdev, char *buf) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "Reguest Lenth: %4d\n", + "%4d\n", acb->firm_request_len); } @@ -331,7 +331,7 @@ arcmsr_attr_host_fw_numbers_queue(struct class_device *cdev, char *buf) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "Numbers of Queue: %4d\n", + "%4d\n", acb->firm_numbers_queue); } @@ -341,7 +341,7 @@ arcmsr_attr_host_fw_sdram_size(struct class_device *cdev, char *buf) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "SDRAM Size: %4d\n", + "%4d\n", acb->firm_sdram_size); } @@ -351,7 +351,7 @@ arcmsr_attr_host_fw_hd_channels(struct class_device *cdev, char *buf) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; return snprintf(buf, PAGE_SIZE, - "Hard Disk Channels: %4d\n", + "%4d\n", acb->firm_hd_channels); } -- cgit v0.10.2 From 2672ea86be26353108a72a28910df4dc61cdb5e2 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Wed, 2 Aug 2006 17:11:49 -0400 Subject: [SCSI] advansys pci tweaks. Remove a lot of duplicate #defines from the advansys driver, and make them look like PCI IDs as defined elsewhere in the kernel. Also add a module table so that it automatically gets picked up by tools relying on modinfo output (like say, distro installers). Signed-off-by: Dave Jones Signed-off-by: James Bottomley diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index e32b4ab..773f02e 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -888,10 +888,6 @@ typedef unsigned char uchar; #define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F) #define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7) #define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF)) -#define ASC_PCI_VENDORID 0x10CD -#define ASC_PCI_DEVICEID_1200A 0x1100 -#define ASC_PCI_DEVICEID_1200B 0x1200 -#define ASC_PCI_DEVICEID_ULTRA 0x1300 #define ASC_PCI_REVISION_3150 0x02 #define ASC_PCI_REVISION_3050 0x03 @@ -899,6 +895,14 @@ typedef unsigned char uchar; #define ASC_DVCLIB_CALL_FAILED (0) #define ASC_DVCLIB_CALL_ERROR (-1) +#define PCI_VENDOR_ID_ASP 0x10cd +#define PCI_DEVICE_ID_ASP_1200A 0x1100 +#define PCI_DEVICE_ID_ASP_ABP940 0x1200 +#define PCI_DEVICE_ID_ASP_ABP940U 0x1300 +#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 +#define PCI_DEVICE_ID_38C0800_REV1 0x2500 +#define PCI_DEVICE_ID_38C1600_REV1 0x2700 + /* * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() @@ -1492,8 +1496,6 @@ typedef struct asc_dvc_cfg { #define ASC_INIT_STATE_END_INQUIRY 0x0080 #define ASC_INIT_RESET_SCSI_DONE 0x0100 #define ASC_INIT_STATE_WITHOUT_EEP 0x8000 -#define ASC_PCI_DEVICE_ID_REV_A 0x1100 -#define ASC_PCI_DEVICE_ID_REV_B 0x1200 #define ASC_BUG_FIX_IF_NOT_DWB 0x0001 #define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 @@ -2100,12 +2102,6 @@ STATIC ASC_DCNT AscGetMaxDmaCount(ushort); #define ADV_NUM_PAGE_CROSSING \ ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE) -/* a_condor.h */ -#define ADV_PCI_VENDOR_ID 0x10CD -#define ADV_PCI_DEVICE_ID_REV_A 0x2300 -#define ADV_PCI_DEVID_38C0800_REV1 0x2500 -#define ADV_PCI_DEVID_38C1600_REV1 0x2700 - #define ADV_EEP_DVC_CFG_BEGIN (0x00) #define ADV_EEP_DVC_CFG_END (0x15) #define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ @@ -3569,14 +3565,7 @@ typedef struct scsi_cmnd REQ, *REQP; #define PCI_MAX_SLOT 0x1F #define PCI_MAX_BUS 0xFF #define PCI_IOADDRESS_MASK 0xFFFE -#define ASC_PCI_VENDORID 0x10CD #define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */ -#define ASC_PCI_DEVICE_ID_1100 0x1100 -#define ASC_PCI_DEVICE_ID_1200 0x1200 -#define ASC_PCI_DEVICE_ID_1300 0x1300 -#define ASC_PCI_DEVICE_ID_2300 0x2300 /* ASC-3550 */ -#define ASC_PCI_DEVICE_ID_2500 0x2500 /* ASC-38C0800 */ -#define ASC_PCI_DEVICE_ID_2700 0x2700 /* ASC-38C1600 */ #ifndef ADVANSYS_STATS #define ASC_STATS(shp, counter) @@ -4330,12 +4319,12 @@ advansys_detect(struct scsi_host_template *tpnt) struct pci_dev *pci_devp = NULL; int pci_device_id_cnt = 0; unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = { - ASC_PCI_DEVICE_ID_1100, - ASC_PCI_DEVICE_ID_1200, - ASC_PCI_DEVICE_ID_1300, - ASC_PCI_DEVICE_ID_2300, - ASC_PCI_DEVICE_ID_2500, - ASC_PCI_DEVICE_ID_2700 + PCI_DEVICE_ID_ASP_1200A, + PCI_DEVICE_ID_ASP_ABP940, + PCI_DEVICE_ID_ASP_ABP940U, + PCI_DEVICE_ID_ASP_ABP940UW, + PCI_DEVICE_ID_38C0800_REV1, + PCI_DEVICE_ID_38C1600_REV1 }; ADV_PADDR pci_memory_address; #endif /* CONFIG_PCI */ @@ -4471,7 +4460,7 @@ advansys_detect(struct scsi_host_template *tpnt) /* Find all PCI cards. */ while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) { - if ((pci_devp = pci_find_device(ASC_PCI_VENDORID, + if ((pci_devp = pci_find_device(PCI_VENDOR_ID_ASP, pci_device_id[pci_device_id_cnt], pci_devp)) == NULL) { pci_device_id_cnt++; @@ -4575,9 +4564,9 @@ advansys_detect(struct scsi_host_template *tpnt) */ #ifdef CONFIG_PCI if (asc_bus[bus] == ASC_IS_PCI && - (pci_devp->device == ASC_PCI_DEVICE_ID_2300 || - pci_devp->device == ASC_PCI_DEVICE_ID_2500 || - pci_devp->device == ASC_PCI_DEVICE_ID_2700)) + (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW || + pci_devp->device == PCI_DEVICE_ID_38C0800_REV1 || + pci_devp->device == PCI_DEVICE_ID_38C1600_REV1)) { boardp->flags |= ASC_IS_WIDE_BOARD; } @@ -4600,11 +4589,11 @@ advansys_detect(struct scsi_host_template *tpnt) adv_dvc_varp->isr_callback = adv_isr_callback; adv_dvc_varp->async_callback = adv_async_callback; #ifdef CONFIG_PCI - if (pci_devp->device == ASC_PCI_DEVICE_ID_2300) + if (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW) { ASC_DBG(1, "advansys_detect: ASC-3550\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; - } else if (pci_devp->device == ASC_PCI_DEVICE_ID_2500) + } else if (pci_devp->device == PCI_DEVICE_ID_38C0800_REV1) { ASC_DBG(1, "advansys_detect: ASC-38C0800\n"); adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; @@ -11922,7 +11911,7 @@ AscInitGetConfig( PCIRevisionID = DvcReadPCIConfigByte(asc_dvc, AscPCIConfigRevisionIDRegister); - if (PCIVendorID != ASC_PCI_VENDORID) { + if (PCIVendorID != PCI_VENDOR_ID_ASP) { warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; } prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc, @@ -11942,15 +11931,15 @@ AscInitGetConfig( warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; } } - if ((PCIDeviceID == ASC_PCI_DEVICEID_1200A) || - (PCIDeviceID == ASC_PCI_DEVICEID_1200B)) { + if ((PCIDeviceID == PCI_DEVICE_ID_ASP_1200A) || + (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940)) { DvcWritePCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer, 0x00); if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) != 0x00) { warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; } - } else if (PCIDeviceID == ASC_PCI_DEVICEID_ULTRA) { + } else if (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940U) { if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) < 0x20) { DvcWritePCIConfigByte(asc_dvc, @@ -12037,8 +12026,8 @@ AscInitFromAscDvcVar( AscSetChipCfgMsw(iop_base, cfg_msw); if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { } else { - if ((pci_device_id == ASC_PCI_DEVICE_ID_REV_A) || - (pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) { + if ((pci_device_id == PCI_DEVICE_ID_ASP_1200A) || + (pci_device_id == PCI_DEVICE_ID_ASP_ABP940)) { asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; } @@ -14275,8 +14264,8 @@ Default_38C0800_EEPROM_Config __initdata = { 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ - ADV_PCI_VENDOR_ID, /* 58 subsysvid */ - ADV_PCI_DEVID_38C0800_REV1, /* 59 subsysid */ + PCI_VENDOR_ID_ASP, /* 58 subsysvid */ + PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ @@ -14405,8 +14394,8 @@ Default_38C1600_EEPROM_Config __initdata = { 0, /* 55 reserved */ 0, /* 56 cisptr_lsw */ 0, /* 57 cisprt_msw */ - ADV_PCI_VENDOR_ID, /* 58 subsysvid */ - ADV_PCI_DEVID_38C1600_REV1, /* 59 subsysid */ + PCI_VENDOR_ID_ASP, /* 58 subsysvid */ + PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */ 0, /* 60 reserved */ 0, /* 61 reserved */ 0, /* 62 reserved */ @@ -18225,3 +18214,22 @@ AdvInquiryHandling( } } MODULE_LICENSE("Dual BSD/GPL"); + +/* PCI Devices supported by this driver */ +static struct pci_device_id advansys_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, advansys_pci_tbl); + -- cgit v0.10.2 From fddafd3d21953d5ea740f7b2f27149f7dd493194 Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 3 Aug 2006 13:54:59 -0500 Subject: [SCSI] DAC960: PCI id table fixup The PCI ID table in the DAC960 driver conflicts with some devices that use the ipr driver. All ipr adapters that use this chip have an IBM subvendor ID and all DAC960 adapters that use this chip have a Mylex subvendor id. Signed-off-by: Brian King Signed-off-by: James Bottomley diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 4cd23c3..a360215 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -7115,7 +7115,7 @@ static struct pci_device_id DAC960_id_table[] = { { .vendor = PCI_VENDOR_ID_MYLEX, .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, - .subvendor = PCI_ANY_ID, + .subvendor = PCI_VENDOR_ID_MYLEX, .subdevice = PCI_ANY_ID, .driver_data = (unsigned long) &DAC960_GEM_privdata, }, -- cgit v0.10.2 From 9e5c50fa8686ede7c37b939a0b950df50346eb3d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 4 Aug 2006 17:18:30 +0200 Subject: [SCSI] remove SCSI_STATE_ #defines These aren't used anymore since the field in scsi_cmnd where it was stored has been removed. Signed-off-by: Christoph Hellwig Signed-off-by: James Bottomley diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 58e6444..be117f8 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -118,20 +118,6 @@ struct scsi_cmnd { unsigned long pid; /* Process ID, starts at 0. Unique per host. */ }; -/* - * These are the values that scsi_cmd->state can take. - */ -#define SCSI_STATE_TIMEOUT 0x1000 -#define SCSI_STATE_FINISHED 0x1001 -#define SCSI_STATE_FAILED 0x1002 -#define SCSI_STATE_QUEUED 0x1003 -#define SCSI_STATE_UNUSED 0x1006 -#define SCSI_STATE_DISCONNECTING 0x1008 -#define SCSI_STATE_INITIALIZING 0x1009 -#define SCSI_STATE_BHQUEUE 0x100a -#define SCSI_STATE_MLQUEUE 0x100b - - extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); extern void scsi_put_command(struct scsi_cmnd *); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); -- cgit v0.10.2 From dd7e2f2266acf66ec882baa6fbd79f853b5fe966 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Fri, 4 Aug 2006 12:09:24 -0500 Subject: [SCSI] scsi_queue_work() documented return value is incorrect If you examine the queue_work() routine you'll see that it returns 1 on success, 0 if the work is already queued. This patch corrects the source code documentation for the scsi_queue_work function. Signed-off-by: Michael Reed Signed-off-by: James Bottomley diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index dfcb96f..f244d4f 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -487,7 +487,9 @@ EXPORT_SYMBOL(scsi_is_host_device); * @work: Work to queue for execution. * * Return value: - * 0 on success / != 0 for error + * 1 - work queued for execution + * 0 - work is already queued + * -EINVAL - work queue doesn't exist **/ int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) { -- cgit v0.10.2 From 5d947f2b7607c4674d104accbd3768744aaa4154 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Mon, 31 Jul 2006 12:19:30 -0500 Subject: [SCSI] mptfc: add additional fc transport attributes Add host_supported_speeds, host_maxframe_size, host_speed, host_fabric_name, host_port_type, host_port_state, and host_symbolic_name transport attributes to fusion fibre channel. Signed-off-by: Michael Reed Acked-by: Moore, Eric Signed-off-by: James Bottomley diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 90da7d6..244a32c 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -162,7 +162,13 @@ static struct fc_function_template mptfc_transport_functions = { .show_starget_port_id = 1, .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, - + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + .show_host_speed = 1, + .show_host_fabric_name = 1, + .show_host_port_type = 1, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, }; static void @@ -836,33 +842,95 @@ mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc) static void mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) { - unsigned class = 0, cos = 0; + unsigned class = 0; + unsigned cos = 0; + unsigned speed; + unsigned port_type; + unsigned port_state; + FCPortPage0_t *pp0; + struct Scsi_Host *sh; + char *sn; /* don't know what to do as only one scsi (fc) host was allocated */ if (portnum != 0) return; - class = ioc->fc_port_page0[portnum].SupportedServiceClass; + pp0 = &ioc->fc_port_page0[portnum]; + sh = ioc->sh; + + sn = fc_host_symbolic_name(sh); + snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh", + ioc->prod_name, + MPT_FW_REV_MAGIC_ID_STRING, + ioc->facts.FWVersion.Word); + + fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; + + fc_host_maxframe_size(sh) = pp0->MaxFrameSize; + + fc_host_node_name(sh) = + (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; + + fc_host_port_name(sh) = + (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low; + + fc_host_port_id(sh) = pp0->PortIdentifier; + + class = pp0->SupportedServiceClass; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1) cos |= FC_COS_CLASS1; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2) cos |= FC_COS_CLASS2; if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3) cos |= FC_COS_CLASS3; + fc_host_supported_classes(sh) = cos; + + if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT) + speed = FC_PORTSPEED_1GBIT; + else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT) + speed = FC_PORTSPEED_2GBIT; + else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT) + speed = FC_PORTSPEED_4GBIT; + else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT) + speed = FC_PORTSPEED_10GBIT; + else + speed = FC_PORTSPEED_UNKNOWN; + fc_host_speed(sh) = speed; + + speed = 0; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED) + speed |= FC_PORTSPEED_1GBIT; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED) + speed |= FC_PORTSPEED_2GBIT; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED) + speed |= FC_PORTSPEED_4GBIT; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED) + speed |= FC_PORTSPEED_10GBIT; + fc_host_supported_speeds(sh) = speed; + + port_state = FC_PORTSTATE_UNKNOWN; + if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE) + port_state = FC_PORTSTATE_ONLINE; + else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE) + port_state = FC_PORTSTATE_LINKDOWN; + fc_host_port_state(sh) = port_state; + + port_type = FC_PORTTYPE_UNKNOWN; + if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT) + port_type = FC_PORTTYPE_PTP; + else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP) + port_type = FC_PORTTYPE_LPORT; + else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP) + port_type = FC_PORTTYPE_NLPORT; + else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT) + port_type = FC_PORTTYPE_NPORT; + fc_host_port_type(sh) = port_type; + + fc_host_fabric_name(sh) = + (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ? + (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low : + (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; - fc_host_node_name(ioc->sh) = - (u64)ioc->fc_port_page0[portnum].WWNN.High << 32 - | (u64)ioc->fc_port_page0[portnum].WWNN.Low; - - fc_host_port_name(ioc->sh) = - (u64)ioc->fc_port_page0[portnum].WWPN.High << 32 - | (u64)ioc->fc_port_page0[portnum].WWPN.Low; - - fc_host_port_id(ioc->sh) = ioc->fc_port_page0[portnum].PortIdentifier; - - fc_host_supported_classes(ioc->sh) = cos; - - fc_host_tgtid_bind_type(ioc->sh) = FC_TGTID_BIND_BY_WWPN; } static void -- cgit v0.10.2 From b5145d25f0d8eae21ad7969822f2d4ce7f22e72a Mon Sep 17 00:00:00 2001 From: Brian King Date: Wed, 2 Aug 2006 14:57:36 -0500 Subject: [SCSI] ipr: Add some hardware defined types for SATA Add some hardware defined types for SATA. This is required by future patches to add SATA support to ipr. Signed-off-by: Brian King Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 1ad24df..1e9c122 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -45,6 +45,7 @@ * This can be adjusted at runtime through sysfs device attributes. */ #define IPR_MAX_CMD_PER_LUN 6 +#define IPR_MAX_CMD_PER_ATA_LUN 1 /* * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of @@ -106,7 +107,7 @@ #define IPR_IOA_BUS 0xff #define IPR_IOA_TARGET 0xff #define IPR_IOA_LUN 0xff -#define IPR_MAX_NUM_BUSES 8 +#define IPR_MAX_NUM_BUSES 16 #define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES #define IPR_NUM_RESET_RELOAD_RETRIES 3 @@ -145,6 +146,7 @@ #define IPR_LUN_RESET 0x40 #define IPR_TARGET_RESET 0x20 #define IPR_BUS_RESET 0x10 +#define IPR_ATA_PHY_RESET 0x80 #define IPR_ID_HOST_RR_Q 0xC4 #define IPR_QUERY_IOA_CONFIG 0xC5 #define IPR_CANCEL_ALL_REQUESTS 0xCE @@ -295,7 +297,11 @@ struct ipr_std_inq_data { }__attribute__ ((packed)); struct ipr_config_table_entry { - u8 service_level; + u8 proto; +#define IPR_PROTO_SATA 0x02 +#define IPR_PROTO_SATA_ATAPI 0x03 +#define IPR_PROTO_SAS_STP 0x06 +#define IPR_PROTO_SAS_STP_ATAPI 0x07 u8 array_id; u8 flags; #define IPR_IS_IOA_RESOURCE 0x80 @@ -307,6 +313,7 @@ struct ipr_config_table_entry { #define IPR_SUBTYPE_AF_DASD 0 #define IPR_SUBTYPE_GENERIC_SCSI 1 #define IPR_SUBTYPE_VOLUME_SET 2 +#define IPR_SUBTYPE_GENERIC_ATA 4 #define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4) #define IPR_QUEUE_FROZEN_MODEL 0 @@ -350,6 +357,7 @@ struct ipr_cmd_pkt { #define IPR_RQTYPE_SCSICDB 0x00 #define IPR_RQTYPE_IOACMD 0x01 #define IPR_RQTYPE_HCAM 0x02 +#define IPR_RQTYPE_ATA_PASSTHRU 0x04 u8 luntar_luntrn; @@ -373,6 +381,37 @@ struct ipr_cmd_pkt { __be16 timeout; }__attribute__ ((packed, aligned(4))); +struct ipr_ioarcb_ata_regs { + u8 flags; +#define IPR_ATA_FLAG_PACKET_CMD 0x80 +#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 +#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20 + u8 reserved[3]; + + __be16 data; + u8 feature; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 command; + u8 reserved2[3]; + u8 hob_feature; + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + u8 ctl; +}__attribute__ ((packed, aligned(4))); + +struct ipr_ioarcb_add_data { + union { + struct ipr_ioarcb_ata_regs regs; + __be32 add_cmd_parms[10]; + }u; +}__attribute__ ((packed, aligned(4))); + /* IOA Request Control Block 128 bytes */ struct ipr_ioarcb { __be32 ioarcb_host_pci_addr; @@ -397,7 +436,7 @@ struct ipr_ioarcb { struct ipr_cmd_pkt cmd_pkt; __be32 add_cmd_parms_len; - __be32 add_cmd_parms[10]; + struct ipr_ioarcb_add_data add_data; }__attribute__((packed, aligned (4))); struct ipr_ioadl_desc { @@ -433,6 +472,21 @@ struct ipr_ioasa_gpdd { __be32 ioa_data[2]; }__attribute__((packed, aligned (4))); +struct ipr_ioasa_gata { + u8 error; + u8 nsect; /* Interrupt reason */ + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 status; + u8 alt_status; /* ATA CTL */ + u8 hob_nsect; + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; +}__attribute__((packed, aligned (4))); + struct ipr_auto_sense { __be16 auto_sense_len; __be16 ioa_data_len; @@ -466,6 +520,7 @@ struct ipr_ioasa { __be32 ioasc_specific; /* status code specific field */ #define IPR_ADDITIONAL_STATUS_FMT 0x80000000 #define IPR_AUTOSENSE_VALID 0x40000000 +#define IPR_ATA_DEVICE_WAS_RESET 0x20000000 #define IPR_IOASC_SPECIFIC_MASK 0x00ffffff #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) #define IPR_FIELD_POINTER_MASK 0x0000ffff @@ -474,6 +529,7 @@ struct ipr_ioasa { struct ipr_ioasa_vset vset; struct ipr_ioasa_af_dasd dasd; struct ipr_ioasa_gpdd gpdd; + struct ipr_ioasa_gata gata; } u; struct ipr_auto_sense auto_sense; @@ -1308,6 +1364,22 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) } /** + * ipr_is_gata - Determine if a resource is a generic ATA resource + * @res: resource entry struct + * + * Return value: + * 1 if GATA / 0 if not GATA + **/ +static inline int ipr_is_gata(struct ipr_resource_entry *res) +{ + if (!ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA) + return 1; + else + return 0; +} + +/** * ipr_is_naca_model - Determine if a resource is using NACA queueing model * @res: resource entry struct * -- cgit v0.10.2 From 896bbd21408ddbfb9a57819404dbb04f4f0afb35 Mon Sep 17 00:00:00 2001 From: Brian King Date: Wed, 2 Aug 2006 14:57:44 -0500 Subject: [SCSI] ipr: Handle new SAS error codes Add definitions for some SAS error codes that can be logged by ipr SAS adapters. Signed-off-by: Brian King Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 01080b3..7f2c5cf 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -175,6 +175,8 @@ struct ipr_error_table_t ipr_error_table[] = { "Qualified success"}, {0x01080000, 1, 1, "FFFE: Soft device bus error recovered by the IOA"}, + {0x01088100, 0, 1, + "4101: Soft device bus fabric error"}, {0x01170600, 0, 1, "FFF9: Device sector reassign successful"}, {0x01170900, 0, 1, @@ -225,6 +227,8 @@ struct ipr_error_table_t ipr_error_table[] = { "3109: IOA timed out a device command"}, {0x04088000, 0, 0, "3120: SCSI bus is not operational"}, + {0x04088100, 0, 1, + "4100: Hard device bus fabric error"}, {0x04118000, 0, 1, "9000: IOA reserved area data check"}, {0x04118100, 0, 1, @@ -273,6 +277,14 @@ struct ipr_error_table_t ipr_error_table[] = { "9091: Incorrect hardware configuration change has been detected"}, {0x04678000, 0, 1, "9073: Invalid multi-adapter configuration"}, + {0x04678100, 0, 1, + "4010: Incorrect connection between cascaded expanders"}, + {0x04678200, 0, 1, + "4020: Connections exceed IOA design limits"}, + {0x04678300, 0, 1, + "4030: Incorrect multipath connection"}, + {0x04679000, 0, 1, + "4110: Unsupported enclosure function"}, {0x046E0000, 0, 1, "FFF4: Command to logical unit failed"}, {0x05240000, 1, 0, @@ -297,6 +309,8 @@ struct ipr_error_table_t ipr_error_table[] = { "9031: Array protection temporarily suspended, protection resuming"}, {0x06040600, 0, 1, "9040: Array protection temporarily suspended, protection resuming"}, + {0x06288000, 0, 1, + "3140: Device bus not ready to ready transition"}, {0x06290000, 0, 1, "FFFB: SCSI bus was reset"}, {0x06290500, 0, 0, @@ -319,6 +333,16 @@ struct ipr_error_table_t ipr_error_table[] = { "3150: SCSI bus configuration error"}, {0x06678100, 0, 1, "9074: Asymmetric advanced function disk configuration"}, + {0x06678300, 0, 1, + "4040: Incomplete multipath connection between IOA and enclosure"}, + {0x06678400, 0, 1, + "4041: Incomplete multipath connection between enclosure and device"}, + {0x06678500, 0, 1, + "9075: Incomplete multipath connection between IOA and remote IOA"}, + {0x06678600, 0, 1, + "9076: Configuration error, missing remote IOA"}, + {0x06679100, 0, 1, + "4050: Enclosure does not support a required multipath function"}, {0x06690200, 0, 1, "9041: Array protection temporarily suspended"}, {0x06698200, 0, 1, @@ -331,6 +355,10 @@ struct ipr_error_table_t ipr_error_table[] = { "9072: Link not operational transition"}, {0x066B8200, 0, 1, "9032: Array exposed but still protected"}, + {0x066B9100, 0, 1, + "4061: Multipath redundancy level got better"}, + {0x066B9200, 0, 1, + "4060: Multipath redundancy level got worse"}, {0x07270000, 0, 0, "Failure due to other device"}, {0x07278000, 0, 1, -- cgit v0.10.2 From 5b7304fbfb74bfca6f7d5a88b28197e3f7f2743b Mon Sep 17 00:00:00 2001 From: Brian King Date: Wed, 2 Aug 2006 14:57:51 -0500 Subject: [SCSI] ipr: Properly handle IOA recovered errors The ipr driver currently translates adapter recovered errors to DID_ERROR. This patch fixes this to translate these errors to success instead. Signed-off-by: Brian King Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 7f2c5cf..55c0156 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4218,7 +4218,8 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, case IPR_IOASC_NR_INIT_CMD_REQUIRED: break; default: - scsi_cmd->result |= (DID_ERROR << 16); + if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) + scsi_cmd->result |= (DID_ERROR << 16); if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) res->needs_sync_complete = 1; break; -- cgit v0.10.2 From 117d2ce1cea25fc94302ff418ccef644cd3e59af Mon Sep 17 00:00:00 2001 From: Brian King Date: Wed, 2 Aug 2006 14:57:58 -0500 Subject: [SCSI] ipr: Auto sense handling fix Fix up a logic error in the checking for valid sense data. Signed-off-by: Brian King Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 55c0156..7ed4eef 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4127,8 +4127,7 @@ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) { struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; - if ((be32_to_cpu(ioasa->ioasc_specific) & - (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0) + if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) return 0; memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, -- cgit v0.10.2 From 008cd5bbfb4763322837cd1f7c621f02ebe22fef Mon Sep 17 00:00:00 2001 From: Brian King Date: Wed, 2 Aug 2006 14:58:04 -0500 Subject: [SCSI] ipr: Bump driver version to 2.1.4 Bump the ipr driver version to 2.1.4. Signed-off-by: Brian King Signed-off-by: James Bottomley diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 1e9c122..11eaff52 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -36,8 +36,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.1.3" -#define IPR_DRIVER_DATE "(March 29, 2006)" +#define IPR_DRIVER_VERSION "2.1.4" +#define IPR_DRIVER_DATE "(August 2, 2006)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding -- cgit v0.10.2 From 4ff36718ede26ee2da73f2dae94d71e2b06845fc Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 4 Jul 2006 12:15:20 -0600 Subject: [SCSI] Improve inquiry printing - Replace scsi_device_types array API with scsi_device_type function API. Gets rid of a lot of common code, as well as being easier to use. - Add the new device types in SPC4 r05a, and rename some of the older ones. - Reformat the printing of inquiry data; now fits on one line and includes PQ. I think I've addressed all the feedback from the previous versions. My current test box prints: scsi 2:0:1:0: Direct access HP 18.2G ATLAS10K3_18_SCA HP05 PQ: 0 ANSI: 2 Signed-off-by: Matthew Wilcox Signed-off-by: James Bottomley diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index afdff32..05f79d7 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -251,10 +251,6 @@ scsi_cmd_stack_free(int ctlr) stk->pool = NULL; } -/* scsi_device_types comes from scsi.h */ -#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \ - "Unknown" : scsi_device_types[n] - #if 0 static int xmargin=8; static int amargin=60; @@ -389,7 +385,7 @@ cciss_scsi_add_entry(int ctlr, int hostno, time anyway (the scsi layer's inquiries will show that info) */ if (hostno != -1) printk("cciss%d: %s device c%db%dt%dl%d added.\n", - ctlr, DEVICETYPE(sd->devtype), hostno, + ctlr, scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, sd->lun); return 0; } @@ -407,7 +403,7 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry) ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; ccissscsi[ctlr].ndevices--; printk("cciss%d: %s device c%db%dt%dl%d removed.\n", - ctlr, DEVICETYPE(sd.devtype), hostno, + ctlr, scsi_device_type(sd.devtype), hostno, sd.bus, sd.target, sd.lun); } @@ -458,7 +454,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno, if (found == 0) { /* device no longer present. */ changes++; /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", - ctlr, DEVICETYPE(csd->devtype), hostno, + ctlr, scsi_device_type(csd->devtype), hostno, csd->bus, csd->target, csd->lun); */ cciss_scsi_remove_entry(ctlr, hostno, i); /* note, i not incremented */ @@ -468,7 +464,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno, printk("cciss%d: device c%db%dt%dl%d type changed " "(device type now %s).\n", ctlr, hostno, csd->bus, csd->target, csd->lun, - DEVICETYPE(csd->devtype)); + scsi_device_type(csd->devtype)); csd->devtype = sd[j].devtype; i++; /* so just move along. */ } else /* device is same as it ever was, */ @@ -1098,7 +1094,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno) if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { printk(KERN_INFO "cciss%d: %s ignored, " "too many devices.\n", cntl_num, - DEVICETYPE(devtype)); + scsi_device_type(devtype)); break; } memcpy(¤tsd[ncurrent].scsi3addr[0], diff --git a/drivers/scsi/fcal.c b/drivers/scsi/fcal.c index 7f89102..c4e16c0 100644 --- a/drivers/scsi/fcal.c +++ b/drivers/scsi/fcal.c @@ -248,8 +248,7 @@ int fcal_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t of if (scd->id == target) { SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ", alpa, target, u1[0], u1[1], u2[0], u2[1]); - SPRINTF ("%s ", (scd->type < MAX_SCSI_DEVICE_CODE) ? - scsi_device_types[(short) scd->type] : "Unknown device"); + SPRINTF ("%s ", scsi_device_type(scd->type)); for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++) SPRINTF ("%c", scd->vendor[j]); diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 67f1100..cdd893b 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -811,7 +811,6 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c struct NCR5380_hostdata *hostdata; #ifdef NCR5380_STATS struct scsi_device *dev; - extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE]; #endif NCR5380_setup(scsi_ptr); @@ -851,7 +850,7 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c long tr = hostdata->time_read[dev->id] / HZ; long tw = hostdata->time_write[dev->id] / HZ; - PRINTP(" T:%d %s " ANDP dev->id ANDP(dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int) dev->type] : "Unknown"); + PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type)); for (i = 0; i < 8; i++) if (dev->vendor[i] >= 0x20) *(buffer + (len++)) = dev->vendor[i]; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 76edbb6..ccb0055 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -2822,9 +2822,7 @@ mega_print_inquiry(char *page, char *scsi_inq) i = scsi_inq[0] & 0x1f; - len += sprintf(page+len, " Type: %s ", - i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : - "Unknown "); + len += sprintf(page+len, " Type: %s ", scsi_device_type(i)); len += sprintf(page+len, " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index b332cad..94df671 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -96,24 +96,40 @@ unsigned int scsi_logging_level; EXPORT_SYMBOL(scsi_logging_level); #endif -const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { - "Direct-Access ", - "Sequential-Access", +static const char *const scsi_device_types[] = { + "Direct access ", + "Sequential access", "Printer ", "Processor ", "WORM ", - "CD-ROM ", + "CD/DVD ", "Scanner ", - "Optical Device ", - "Medium Changer ", + "Optical memory ", + "Media changer ", "Communications ", - "Unknown ", - "Unknown ", + "ASC IT8 ", + "ASC IT8 ", "RAID ", "Enclosure ", - "Direct-Access-RBC", + "Direct access RBC", + "Optical card ", + "Bridge controller", + "Object storage ", + "Automation/Drive ", }; -EXPORT_SYMBOL(scsi_device_types); + +const char * scsi_device_type(unsigned type) +{ + if (type == 0x1e) + return "Well-known LUN "; + if (type == 0x1f) + return "No Device "; + if (type > ARRAY_SIZE(scsi_device_types)) + return "Unknown "; + return scsi_device_types[type]; +} + +EXPORT_SYMBOL(scsi_device_type); struct scsi_host_cmd_pool { kmem_cache_t *slab; diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 55200e4..524a5f7 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -178,9 +178,7 @@ static int proc_print_scsidevice(struct device *dev, void *data) seq_printf(s, "\n"); - seq_printf(s, " Type: %s ", - sdev->type < MAX_SCSI_DEVICE_CODE ? - scsi_device_types[(int) sdev->type] : "Unknown "); + seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); seq_printf(s, " ANSI" " SCSI revision: %02x", (sdev->scsi_level - 1) ? sdev->scsi_level - 1 : 1); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1bd92b9..1803994 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -134,59 +134,6 @@ static void scsi_unlock_floptical(struct scsi_device *sdev, } /** - * print_inquiry - printk the inquiry information - * @inq_result: printk this SCSI INQUIRY - * - * Description: - * printk the vendor, model, and other information found in the - * INQUIRY data in @inq_result. - * - * Notes: - * Remove this, and replace with a hotplug event that logs any - * relevant information. - **/ -static void print_inquiry(unsigned char *inq_result) -{ - int i; - - printk(KERN_NOTICE " Vendor: "); - for (i = 8; i < 16; i++) - if (inq_result[i] >= 0x20 && i < inq_result[4] + 5) - printk("%c", inq_result[i]); - else - printk(" "); - - printk(" Model: "); - for (i = 16; i < 32; i++) - if (inq_result[i] >= 0x20 && i < inq_result[4] + 5) - printk("%c", inq_result[i]); - else - printk(" "); - - printk(" Rev: "); - for (i = 32; i < 36; i++) - if (inq_result[i] >= 0x20 && i < inq_result[4] + 5) - printk("%c", inq_result[i]); - else - printk(" "); - - printk("\n"); - - i = inq_result[0] & 0x1f; - - printk(KERN_NOTICE " Type: %s ", - i < - MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : - "Unknown "); - printk(" ANSI SCSI revision: %02x", - inq_result[2] & 0x07); - if ((inq_result[2] & 0x07) == 1 && (inq_result[3] & 0x0f) == 1) - printk(" CCS\n"); - else - printk("\n"); -} - -/** * scsi_alloc_sdev - allocate and setup a scsi_Device * * Description: @@ -653,9 +600,8 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) if (*bflags & BLIST_ISROM) { /* * It would be better to modify sdev->type, and set - * sdev->removable, but then the print_inquiry() output - * would not show TYPE_ROM; if print_inquiry() is removed - * the issue goes away. + * sdev->removable; this can now be done since + * print_inquiry has gone away. */ inq_result[0] = TYPE_ROM; inq_result[1] |= 0x80; /* removable */ @@ -684,8 +630,6 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); } - print_inquiry(inq_result); - /* * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI * spec says: The device server is capable of supporting the @@ -715,6 +659,12 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) if (inq_result[7] & 0x10) sdev->sdtr = 1; + sdev_printk(KERN_NOTICE "scsi", sdev, "%s %.8s %.16s %.4s PQ: %d " + "ANSI: %d%s\n", scsi_device_type(sdev->type), + sdev->vendor, sdev->model, sdev->rev, + sdev->inq_periph_qual, inq_result[2] & 0x07, + (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); + /* * End sysfs code. */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index c60b8ff..1bc6752 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -25,13 +25,6 @@ extern const unsigned char scsi_command_size[8]; #define COMMAND_SIZE(opcode) scsi_command_size[((opcode) >> 5) & 7] /* - * SCSI device types - */ - -#define MAX_SCSI_DEVICE_CODE 15 -extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE]; - -/* * Special value for scanning to specify scanning or rescanning of all * possible channels, (target) ids, or luns on a given shost. */ @@ -225,6 +218,9 @@ static inline int scsi_status_is_good(int status) #define TYPE_RBC 0x0e #define TYPE_NO_LUN 0x7f +/* Returns a human-readable name for the device */ +extern const char * scsi_device_type(unsigned type); + /* * standard mode-select header prepended to all mode-select commands */ -- cgit v0.10.2 From 19ac0db3e22de3b00cc4aadc7efbad0420c7aa08 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 6 Aug 2006 18:15:22 -0500 Subject: [SCSI] fix up short inquiry printing A recent drivers base commit: 3e95637a48820ff8bedb33e6439def96ccff1de5 Caused the bus to be added to dev_printk, so now our SCSI inquiry short messages print like this: scsiscsi 2:0:0:0: Direct access IBM-ESXS ST973401SS B519 PQ: 0 ANSI: 5 Just remove the "scsi" from the sdev_printk to compensate. Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1803994..114e206 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -659,7 +659,7 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) if (inq_result[7] & 0x10) sdev->sdtr = 1; - sdev_printk(KERN_NOTICE "scsi", sdev, "%s %.8s %.16s %.4s PQ: %d " + sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " "ANSI: %d%s\n", scsi_device_type(sdev->type), sdev->vendor, sdev->model, sdev->rev, sdev->inq_periph_qual, inq_result[2] & 0x07, -- cgit v0.10.2 From 04846f25920d4b05d6040c531cc601049260db52 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Wed, 9 Aug 2006 17:31:16 +0200 Subject: [SCSI] limit recursion when flushing shost->starved_list Attached is a patch that should limit a possible recursion that can lead to a stack overflow like follows: Kernel stack overflow. CPU: 3 Not tainted Process zfcperp0.0.d819 (pid: 13897, task: 000000003e0d8cc8, ksp: 000000003499dbb8) Krnl PSW : 0404000180000000 000000000030f8b2 (get_device+0x12/0x48) Krnl GPRS: 00000000135a1980 000000000030f758 000000003ed6c1e8 0000000000000005 0000000000000000 000000000044a780 000000003dbf7000 0000000034e15800 000000003621c048 070000003499c108 000000003499c1a0 000000003ed6c000 0000000040895000 00000000408ab630 000000003499c0a0 000000003499c0a0 Krnl Code: a7 fb ff e8 a7 19 00 00 b9 02 00 22 e3 e0 f0 98 00 24 a7 84 Call Trace: ([<000000004089edc2>] scsi_request_fn+0x13e/0x650 [scsi_mod]) [<00000000002c5ff4>] blk_run_queue+0xd4/0x1a4 [<000000004089ff8c>] scsi_queue_insert+0x22c/0x2a4 [scsi_mod] [<000000004089779a>] scsi_dispatch_cmd+0x8a/0x3d0 [scsi_mod] [<000000004089f1ec>] scsi_request_fn+0x568/0x650 [scsi_mod] ... [<000000004089f1ec>] scsi_request_fn+0x568/0x650 [scsi_mod] [<00000000002c5ff4>] blk_run_queue+0xd4/0x1a4 [<000000004089ff8c>] scsi_queue_insert+0x22c/0x2a4 [scsi_mod] [<000000004089779a>] scsi_dispatch_cmd+0x8a/0x3d0 [scsi_mod] [<000000004089f1ec>] scsi_request_fn+0x568/0x650 [scsi_mod] [<00000000002c5ff4>] blk_run_queue+0xd4/0x1a4 [<000000004089fa9e>] scsi_run_host_queues+0x196/0x230 [scsi_mod] [<00000000409eba28>] zfcp_erp_thread+0x2638/0x3080 [zfcp] [<0000000000107462>] kernel_thread_starter+0x6/0xc [<000000000010745c>] kernel_thread_starter+0x0/0xc <0>Kernel panic - not syncing: Corrupt kernel stack, can't continue. This stack overflow occurred during tests on s390 using zfcp. Recursion depth for this panic was 19. Usually recursion between blk_run_queue and a request_fn is avoided using QUEUE_FLAG_REENTER. But this does not help if the scsi stack tries to flush the starved_list of a scsi_host. Limit recursion depth when flushing the starved_list of a scsi_host. Signed-off-by: Andreas Herrmann Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 077c1c6..d6743b9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -551,7 +551,15 @@ static void scsi_run_queue(struct request_queue *q) list_del_init(&sdev->starved_entry); spin_unlock_irqrestore(shost->host_lock, flags); - blk_run_queue(sdev->request_queue); + + if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && + !test_and_set_bit(QUEUE_FLAG_REENTER, + &sdev->request_queue->queue_flags)) { + blk_run_queue(sdev->request_queue); + clear_bit(QUEUE_FLAG_REENTER, + &sdev->request_queue->queue_flags); + } else + blk_run_queue(sdev->request_queue); spin_lock_irqsave(shost->host_lock, flags); if (unlikely(!list_empty(&sdev->starved_entry))) -- cgit v0.10.2 From c8f7b073e0e81499474a84ee2a90f77f7805c7f8 Mon Sep 17 00:00:00 2001 From: Mark Haverkamp Date: Thu, 3 Aug 2006 08:02:24 -0700 Subject: [SCSI] aacraid: interruptible ioctl Received from Mark Salyzyn This patch allows the FSACTL_SEND_LARGE_FIB, FSACTL_SENDFIB and FSACTL_SEND_RAW_SRB ioctl calls into the aacraid driver to be interruptible. Only necessary if the adapter and/or the management software has gone into some sort of misbehavior and the system is being rebooted, thus permitting the user management software applications to be killed relatively cleanly. The FIB queue resource is held out of the free queue until the adapter finally, if ever, completes the command. Signed-off-by: Mark Haverkamp Signed-off-by: James Bottomley diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 255421d..14d7aa9 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include /* ssleep prototype */ #include #include #include @@ -140,7 +140,8 @@ cleanup: fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib = hw_fib; } - aac_fib_free(fibptr); + if (retval != -EINTR) + aac_fib_free(fibptr); return retval; } @@ -621,7 +622,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); if(actual_fibsize != fibsize){ // User made a mistake - should not continue - dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); + dprintk((KERN_DEBUG"aacraid: Bad Size specified in " + "Raw SRB command calculated fibsize=%d " + "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d " + "issued fibsize=%d\n", + actual_fibsize, user_srbcmd->sg.count, + sizeof(struct aac_srb), sizeof(struct sgentry), + fibsize)); rcode = -EINVAL; goto cleanup; } @@ -663,6 +670,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } + if (status == -EINTR) { + rcode = -EINTR; + goto cleanup; + } if (status != 0){ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); @@ -696,8 +707,10 @@ cleanup: for(i=0; i <= sg_indx; i++){ kfree(sg_list[i]); } - aac_fib_complete(srbfib); - aac_fib_free(srbfib); + if (rcode != -EINTR) { + aac_fib_complete(srbfib); + aac_fib_free(srbfib); + } return rcode; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 3f27419..c67da132 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -464,6 +464,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); + if (!dev->queues) + return -ENODEV; q = &dev->queues->queue[AdapNormCmdQueue]; if(wait) @@ -527,8 +529,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } udelay(5); } - } else - down(&fibptr->event_wait); + } else if (down_interruptible(&fibptr->event_wait)) { + spin_lock_irqsave(&fibptr->event_lock, flags); + if (fibptr->done == 0) { + fibptr->done = 2; /* Tell interrupt we aborted */ + spin_unlock_irqrestore(&fibptr->event_lock, flags); + return -EINTR; + } + spin_unlock_irqrestore(&fibptr->event_lock, flags); + } BUG_ON(fibptr->done == 0); if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ @@ -795,7 +804,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) /* Sniff for container changes */ - if (!dev) + if (!dev || !dev->fsa_dev) return; container = (u32)-1; diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index b2a5c72..8335f07 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -124,10 +124,15 @@ unsigned int aac_response_normal(struct aac_queue * q) } else { unsigned long flagv; spin_lock_irqsave(&fib->event_lock, flagv); - fib->done = 1; + if (!fib->done) + fib->done = 1; up(&fib->event_wait); spin_unlock_irqrestore(&fib->event_lock, flagv); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); + if (fib->done == 2) { + aac_fib_complete(fib); + aac_fib_free(fib); + } } consumed++; spin_lock_irqsave(q->lock, flags); @@ -316,7 +321,8 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) unsigned long flagv; dprintk((KERN_INFO "event_wait up\n")); spin_lock_irqsave(&fib->event_lock, flagv); - fib->done = 1; + if (!fib->done) + fib->done = 1; up(&fib->event_wait); spin_unlock_irqrestore(&fib->event_lock, flagv); FIB_COUNTER_INCREMENT(aac_config.NormalRecved); -- cgit v0.10.2 From 8c23cd7457151fc8ace79ec700a8aeaa9fc5b3d9 Mon Sep 17 00:00:00 2001 From: Mark Haverkamp Date: Tue, 8 Aug 2006 08:52:14 -0700 Subject: [SCSI] aacraid: Restart adapter on firmware assert (Update 2) Received from Mark Salyzyn If the adapter should be in a blinkled (Firmware Assert) state when the driver loads, we will perform a warm restart of the Adapter Firmware to see if we can rescue the adapter. Possible causes of a blinkled can occur on some early release motherboard BIOSes, transitory PCI bus problems on embedded systems or non-x86 based architectures, transitory startup failures of early release drives or transitory hardware failures; some of which can bite the adapter later at runtime. Future enhancements will include recovery during runtime. Fixed extra whitespace space issue. Signed-off-by: Mark Haverkamp Signed-off-by: James Bottomley diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d0eecd4..05f8098 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1670,6 +1670,7 @@ extern struct aac_common aac_config; #define RCV_TEMP_READINGS 0x00000025 #define GET_COMM_PREFERRED_SETTINGS 0x00000026 #define IOP_RESET 0x00001000 +#define IOP_RESET_ALWAYS 0x00001001 #define RE_INIT_ADAPTER 0x000000ee /* diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c index 458ea89..f850c3a 100644 --- a/drivers/scsi/aacraid/rkt.c +++ b/drivers/scsi/aacraid/rkt.c @@ -395,6 +395,25 @@ static int aac_rkt_send(struct fib * fib) return 0; } +static int aac_rkt_restart_adapter(struct aac_dev *dev) +{ + u32 var; + + printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", + dev->name, dev->id); + + if (aac_rkt_check_health(dev) <= 0) + return 1; + if (rkt_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, + &var, NULL, NULL, NULL, NULL)) + return 1; + if (var != 0x00000001) + return 1; + if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) + return 1; + return 0; +} + /** * aac_rkt_init - initialize an i960 based AAC card * @dev: device to configure @@ -417,6 +436,9 @@ int aac_rkt_init(struct aac_dev *dev) /* * Check to see if the board panic'd while booting. */ + if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) + if (aac_rkt_restart_adapter(dev)) + goto error_iounmap; /* * Check to see if the board failed any self tests. */ @@ -431,13 +453,6 @@ int aac_rkt_init(struct aac_dev *dev) printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } - /* - * Check to see if the board panic'd while booting. - */ - if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) { - printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance); - goto error_iounmap; - } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 035018d..c715c4b 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -394,6 +394,25 @@ static int aac_rx_send(struct fib * fib) return 0; } +static int aac_rx_restart_adapter(struct aac_dev *dev) +{ + u32 var; + + printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", + dev->name, dev->id); + + if (aac_rx_check_health(dev) <= 0) + return 1; + if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, + &var, NULL, NULL, NULL, NULL)) + return 1; + if (var != 0x00000001) + return 1; + if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) + return 1; + return 0; +} + /** * aac_rx_init - initialize an i960 based AAC card * @dev: device to configure @@ -416,6 +435,9 @@ int aac_rx_init(struct aac_dev *dev) /* * Check to see if the board panic'd while booting. */ + if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) + if (aac_rx_restart_adapter(dev)) + goto error_iounmap; /* * Check to see if the board failed any self tests. */ @@ -424,13 +446,6 @@ int aac_rx_init(struct aac_dev *dev) goto error_iounmap; } /* - * Check to see if the board panic'd while booting. - */ - if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) { - printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance); - goto error_iounmap; - } - /* * Check to see if the monitor panic'd while booting. */ if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) { -- cgit v0.10.2 From 90ee346651524eb275405d410f5d3bb6765a2d93 Mon Sep 17 00:00:00 2001 From: Mark Haverkamp Date: Thu, 3 Aug 2006 08:03:07 -0700 Subject: [SCSI] aacraid: Check for unlikely errors Received from Mark Salyzyn The enclosed patch cleans up some code fragments, adds some paranoia (unproven causes of potential driver failures). Signed-off-by: Mark Haverkamp Signed-off-by: James Bottomley diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 83b5c7d..699351c 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -489,6 +489,8 @@ int aac_probe_container(struct aac_dev *dev, int cid) unsigned instance; fsa_dev_ptr = dev->fsa_dev; + if (!fsa_dev_ptr) + return -ENOMEM; instance = dev->scsi_host_ptr->unique_id; if (!(fibptr = aac_fib_alloc(dev))) @@ -1392,6 +1394,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) struct scsi_cmnd *cmd; struct scsi_device *sdev = scsicmd->device; int active = 0; + struct aac_dev *aac; unsigned long flags; /* @@ -1413,11 +1416,11 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) if (active) return SCSI_MLQUEUE_DEVICE_BUSY; + aac = (struct aac_dev *)scsicmd->device->host->hostdata; /* * Allocate and initialize a Fib */ - if (!(cmd_fibcontext = - aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) + if (!(cmd_fibcontext = aac_fib_alloc(aac))) return SCSI_MLQUEUE_HOST_BUSY; aac_fib_init(cmd_fibcontext); @@ -1470,6 +1473,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) struct aac_dev *dev = (struct aac_dev *)host->hostdata; struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; + if (fsa_dev_ptr == NULL) + return -1; /* * If the bus, id or lun is out of range, return fail * Test does not apply to ID 16, the pseudo id for the controller @@ -1782,6 +1787,8 @@ static int query_disk(struct aac_dev *dev, void __user *arg) struct fsa_dev_info *fsa_dev_ptr; fsa_dev_ptr = dev->fsa_dev; + if (!fsa_dev_ptr) + return -ENODEV; if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) return -EFAULT; if (qd.cnum == -1) @@ -1843,6 +1850,10 @@ static int delete_disk(struct aac_dev *dev, void __user *arg) struct fsa_dev_info *fsa_dev_ptr; fsa_dev_ptr = dev->fsa_dev; + if (!fsa_dev_ptr) + return -ENODEV; + if (!fsa_dev_ptr) + return -ENODEV; if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) return -EFAULT; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 1cd3584..87a9550 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -180,7 +180,7 @@ int aac_send_shutdown(struct aac_dev * dev) -2 /* Timeout silently */, 1, NULL, NULL); - if (status == 0) + if (status >= 0) aac_fib_complete(fibctx); aac_fib_free(fibctx); return status; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index e42a479..9d8b550 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1013,6 +1013,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev) list_del(&aac->entry); scsi_host_put(shost); pci_disable_device(pdev); + if (list_empty(&aac_devices)) { + unregister_chrdev(aac_cfg_major, "aac"); + aac_cfg_major = -1; + } } static struct pci_driver aac_pci_driver = { -- cgit v0.10.2 From 8c867b257d159ca04602d7087fa29f846785f9ea Mon Sep 17 00:00:00 2001 From: Mark Haverkamp Date: Thu, 3 Aug 2006 08:03:30 -0700 Subject: [SCSI] aacraid: Reset adapter in recovery timeout Received from Mark Salyzyn If the adapter is in blinkled (Firmware Assert) when error recovery timeout actions have been triggered, perform an adapter warm reset and restart the initialization. Signed-off-by: Mark Haverkamp Signed-off-by: James Bottomley diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 699351c..37c55dd 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -175,7 +175,7 @@ MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. * * Query config status, and commit the configuration if needed. */ -int aac_get_config_status(struct aac_dev *dev) +int aac_get_config_status(struct aac_dev *dev, int commit_flag) { int status = 0; struct fib * fibptr; @@ -219,7 +219,7 @@ int aac_get_config_status(struct aac_dev *dev) aac_fib_complete(fibptr); /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ if (status >= 0) { - if (commit == 1) { + if ((commit == 1) || commit_flag) { struct aac_commit_config * dinfo; aac_fib_init(fibptr); dinfo = (struct aac_commit_config *) fib_data(fibptr); @@ -784,8 +784,9 @@ int aac_get_adapter_info(struct aac_dev* dev) dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); } - tmp = le32_to_cpu(dev->adapter_info.kernelrev); - printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", + if (!dev->in_reset) { + tmp = le32_to_cpu(dev->adapter_info.kernelrev); + printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", dev->name, dev->id, tmp>>24, @@ -794,20 +795,21 @@ int aac_get_adapter_info(struct aac_dev* dev) le32_to_cpu(dev->adapter_info.kernelbuild), (int)sizeof(dev->supplement_adapter_info.BuildDate), dev->supplement_adapter_info.BuildDate); - tmp = le32_to_cpu(dev->adapter_info.monitorrev); - printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", + tmp = le32_to_cpu(dev->adapter_info.monitorrev); + printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", dev->name, dev->id, tmp>>24,(tmp>>16)&0xff,tmp&0xff, le32_to_cpu(dev->adapter_info.monitorbuild)); - tmp = le32_to_cpu(dev->adapter_info.biosrev); - printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", + tmp = le32_to_cpu(dev->adapter_info.biosrev); + printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", dev->name, dev->id, tmp>>24,(tmp>>16)&0xff,tmp&0xff, le32_to_cpu(dev->adapter_info.biosbuild)); - if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) - printk(KERN_INFO "%s%d: serial %x\n", - dev->name, dev->id, - le32_to_cpu(dev->adapter_info.serial[0])); + if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) + printk(KERN_INFO "%s%d: serial %x\n", + dev->name, dev->id, + le32_to_cpu(dev->adapter_info.serial[0])); + } dev->nondasd_support = 0; dev->raid_scsi_mode = 0; @@ -1417,6 +1419,9 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) return SCSI_MLQUEUE_DEVICE_BUSY; aac = (struct aac_dev *)scsicmd->device->host->hostdata; + if (aac->in_reset) + return SCSI_MLQUEUE_HOST_BUSY; + /* * Allocate and initialize a Fib */ @@ -1504,6 +1509,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case INQUIRY: case READ_CAPACITY: case TEST_UNIT_READY: + if (dev->in_reset) + return -1; spin_unlock_irq(host->host_lock); aac_probe_container(dev, cid); if ((fsa_dev_ptr[cid].valid & 1) == 0) @@ -1529,6 +1536,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) } } else { /* check for physical non-dasd devices */ if(dev->nondasd_support == 1){ + if (dev->in_reset) + return -1; return aac_send_srb_fib(scsicmd); } else { scsicmd->result = DID_NO_CONNECT << 16; @@ -1584,6 +1593,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) scsicmd->scsi_done(scsicmd); return 0; } + if (dev->in_reset) + return -1; setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); @@ -1739,6 +1750,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case READ_10: case READ_12: case READ_16: + if (dev->in_reset) + return -1; /* * Hack to keep track of ordinal number of the device that * corresponds to a container. Needed to convert @@ -1757,6 +1770,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case WRITE_10: case WRITE_12: case WRITE_16: + if (dev->in_reset) + return -1; return aac_write(scsicmd, cid); case SYNCHRONIZE_CACHE: diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 05f8098..8924c18 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1029,6 +1029,7 @@ struct aac_dev init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) u8 raw_io_64; u8 printf_enabled; + u8 in_reset; }; #define aac_adapter_interrupt(dev) \ @@ -1789,7 +1790,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); int aac_fib_complete(struct fib * context); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); -int aac_get_config_status(struct aac_dev *dev); +int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); @@ -1800,6 +1801,7 @@ int aac_sa_init(struct aac_dev *dev); unsigned int aac_response_normal(struct aac_queue * q); unsigned int aac_command_normal(struct aac_queue * q); unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); +int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 14d7aa9..da1d3a9 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -298,7 +298,7 @@ return_fib: spin_unlock_irqrestore(&dev->fib_lock, flags); /* If someone killed the AIF aacraid thread, restart it */ status = !dev->aif_thread; - if (status && dev->queues && dev->fsa_dev) { + if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { /* Be paranoid, be very paranoid! */ kthread_stop(dev->thread); ssleep(1); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index c67da132..53add53 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -40,8 +40,10 @@ #include #include #include +#include #include #include +#include #include #include "aacraid.h" @@ -1054,6 +1056,262 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) } +static int _aac_reset_adapter(struct aac_dev *aac) +{ + int index, quirks; + u32 ret; + int retval; + struct Scsi_Host *host; + struct scsi_device *dev; + struct scsi_cmnd *command; + struct scsi_cmnd *command_list; + + /* + * Assumptions: + * - host is locked. + * - in_reset is asserted, so no new i/o is getting to the + * card. + * - The card is dead. + */ + host = aac->scsi_host_ptr; + scsi_block_requests(host); + aac_adapter_disable_int(aac); + spin_unlock_irq(host->host_lock); + kthread_stop(aac->thread); + + /* + * If a positive health, means in a known DEAD PANIC + * state and the adapter could be reset to `try again'. + */ + retval = aac_adapter_check_health(aac); + if (retval == 0) + retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS, + 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL); + if (retval) + retval = aac_adapter_sync_cmd(aac, IOP_RESET, + 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL); + + if (retval) + goto out; + if (ret != 0x00000001) { + retval = -ENODEV; + goto out; + } + + index = aac->cardtype; + + /* + * Re-initialize the adapter, first free resources, then carefully + * apply the initialization sequence to come back again. Only risk + * is a change in Firmware dropping cache, it is assumed the caller + * will ensure that i/o is queisced and the card is flushed in that + * case. + */ + aac_fib_map_free(aac); + aac->hw_fib_va = NULL; + aac->hw_fib_pa = 0; + pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); + aac->comm_addr = NULL; + aac->comm_phys = 0; + kfree(aac->queues); + aac->queues = NULL; + free_irq(aac->pdev->irq, aac); + kfree(aac->fsa_dev); + aac->fsa_dev = NULL; + if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) { + if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) || + ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK)))) + goto out; + } else { + if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) || + ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL)))) + goto out; + } + if ((retval = (*(aac_get_driver_ident(index)->init))(aac))) + goto out; + if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) + if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) + goto out; + aac->thread = kthread_run(aac_command_thread, aac, aac->name); + if (IS_ERR(aac->thread)) { + retval = PTR_ERR(aac->thread); + goto out; + } + (void)aac_get_adapter_info(aac); + quirks = aac_get_driver_ident(index)->quirks; + if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) { + host->sg_tablesize = 34; + host->max_sectors = (host->sg_tablesize * 8) + 112; + } + if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) { + host->sg_tablesize = 17; + host->max_sectors = (host->sg_tablesize * 8) + 112; + } + aac_get_config_status(aac, 1); + aac_get_containers(aac); + /* + * This is where the assumption that the Adapter is quiesced + * is important. + */ + command_list = NULL; + __shost_for_each_device(dev, host) { + unsigned long flags; + spin_lock_irqsave(&dev->list_lock, flags); + list_for_each_entry(command, &dev->cmd_list, list) + if (command->SCp.phase == AAC_OWNER_FIRMWARE) { + command->SCp.buffer = (struct scatterlist *)command_list; + command_list = command; + } + spin_unlock_irqrestore(&dev->list_lock, flags); + } + while ((command = command_list)) { + command_list = (struct scsi_cmnd *)command->SCp.buffer; + command->SCp.buffer = NULL; + command->result = DID_OK << 16 + | COMMAND_COMPLETE << 8 + | SAM_STAT_TASK_SET_FULL; + command->SCp.phase = AAC_OWNER_ERROR_HANDLER; + command->scsi_done(command); + } + retval = 0; + +out: + aac->in_reset = 0; + scsi_unblock_requests(host); + spin_lock_irq(host->host_lock); + return retval; +} + +int aac_check_health(struct aac_dev * aac) +{ + int BlinkLED; + unsigned long time_now, flagv = 0; + struct list_head * entry; + struct Scsi_Host * host; + + /* Extending the scope of fib_lock slightly to protect aac->in_reset */ + if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) + return 0; + + if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { + spin_unlock_irqrestore(&aac->fib_lock, flagv); + return 0; /* OK */ + } + + aac->in_reset = 1; + + /* Fake up an AIF: + * aac_aifcmd.command = AifCmdEventNotify = 1 + * aac_aifcmd.seqnum = 0xFFFFFFFF + * aac_aifcmd.data[0] = AifEnExpEvent = 23 + * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 + * aac.aifcmd.data[2] = AifHighPriority = 3 + * aac.aifcmd.data[3] = BlinkLED + */ + + time_now = jiffies/HZ; + entry = aac->fib_list.next; + + /* + * For each Context that is on the + * fibctxList, make a copy of the + * fib, and then set the event to wake up the + * thread that is waiting for it. + */ + while (entry != &aac->fib_list) { + /* + * Extract the fibctx + */ + struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); + struct hw_fib * hw_fib; + struct fib * fib; + /* + * Check if the queue is getting + * backlogged + */ + if (fibctx->count > 20) { + /* + * It's *not* jiffies folks, + * but jiffies / HZ, so do not + * panic ... + */ + u32 time_last = fibctx->jiffies; + /* + * Has it been > 2 minutes + * since the last read off + * the queue? + */ + if ((time_now - time_last) > aif_timeout) { + entry = entry->next; + aac_close_fib_context(aac, fibctx); + continue; + } + } + /* + * Warning: no sleep allowed while + * holding spinlock + */ + hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); + fib = kmalloc(sizeof(struct fib), GFP_ATOMIC); + if (fib && hw_fib) { + struct aac_aifcmd * aif; + + memset(hw_fib, 0, sizeof(struct hw_fib)); + memset(fib, 0, sizeof(struct fib)); + fib->hw_fib = hw_fib; + fib->dev = aac; + aac_fib_init(fib); + fib->type = FSAFS_NTC_FIB_CONTEXT; + fib->size = sizeof (struct fib); + fib->data = hw_fib->data; + aif = (struct aac_aifcmd *)hw_fib->data; + aif->command = cpu_to_le32(AifCmdEventNotify); + aif->seqnum = cpu_to_le32(0xFFFFFFFF); + aif->data[0] = cpu_to_le32(AifEnExpEvent); + aif->data[1] = cpu_to_le32(AifExeFirmwarePanic); + aif->data[2] = cpu_to_le32(AifHighPriority); + aif->data[3] = cpu_to_le32(BlinkLED); + + /* + * Put the FIB onto the + * fibctx's fibs + */ + list_add_tail(&fib->fiblink, &fibctx->fib_list); + fibctx->count++; + /* + * Set the event to wake up the + * thread that will waiting. + */ + up(&fibctx->wait_sem); + } else { + printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); + kfree(fib); + kfree(hw_fib); + } + entry = entry->next; + } + + spin_unlock_irqrestore(&aac->fib_lock, flagv); + + if (BlinkLED < 0) { + printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); + goto out; + } + + printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); + + host = aac->scsi_host_ptr; + spin_lock_irqsave(host->host_lock, flagv); + BlinkLED = _aac_reset_adapter(aac); + spin_unlock_irqrestore(host->host_lock, flagv); + return BlinkLED; + +out: + aac->in_reset = 0; + return BlinkLED; +} + + /** * aac_command_thread - command processing thread * @dev: Adapter to monitor diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 9d8b550..d67058f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -454,17 +454,17 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); aac = (struct aac_dev *)host->hostdata; - if (aac_adapter_check_health(aac)) { - printk(KERN_ERR "%s: Host adapter appears dead\n", - AAC_DRIVERNAME); - return -ENODEV; - } + + if ((count = aac_check_health(aac))) + return count; /* * Wait for all commands to complete to this specific * target (block maximum 60 seconds). */ for (count = 60; count; --count) { - int active = 0; + int active = aac->in_reset; + + if (active == 0) __shost_for_each_device(dev, host) { spin_lock_irqsave(&dev->list_lock, flags); list_for_each_entry(command, &dev->cmd_list, list) { @@ -933,7 +933,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, else shost->max_channel = 0; - aac_get_config_status(aac); + aac_get_config_status(aac, 0); aac_get_containers(aac); list_add(&aac->entry, insert); -- cgit v0.10.2 From 84961f28e9d13a4b193d0c8545f3c060c1890ff3 Mon Sep 17 00:00:00 2001 From: dave wysochanski Date: Wed, 9 Aug 2006 14:56:32 -0400 Subject: [SCSI] Don't add scsi_device for devices that return PQ=1, PDT=0x1f Some targets may return slight variations of PQ and PDT to indicate no LUN mapped. USB UFI setting PDT=0x1f but having reserved bits for PQ is one example, and NetApp targets returning PQ=1 and PDT=0x1f is another. Both instances seem like reasonable responses according to SPC-3 and UFI specs. The current scsi_probe_and_add_lun() code adds a scsi_device for targets that return PQ=1 and PDT=0x1f. This causes LUNs of type "UNKNOWN" to show up in /proc/scsi/scsi when no LUNs are mapped. In addition, subsequent rescans fail to recognize LUNs that may be added on the target, unless preceded by a write to the delete attribute of the "UNKNOWN" LUN. This patch addresses this problem by skipping over the scsi_add_lun() when PQ=1,PDT=0x1f is encountered, and just returns SCSI_SCAN_TARGET_PRESENT. Signed-off-by: Dave Wysochanski Signed-off-by: Christoph Hellwig Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 114e206..a24d346 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -893,11 +893,26 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, } /* - * Non-standard SCSI targets may set the PDT to 0x1f (unknown or - * no device type) instead of using the Peripheral Qualifier to - * indicate that no LUN is present. For example, USB UFI does this. + * Some targets may set slight variations of PQ and PDT to signal + * that no LUN is present, so don't add sdev in these cases. + * Two specific examples are: + * 1) NetApp targets: return PQ=1, PDT=0x1f + * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" + * in the UFI 1.0 spec (we cannot rely on reserved bits). + * + * References: + * 1) SCSI SPC-3, pp. 145-146 + * PQ=1: "A peripheral device having the specified peripheral + * device type is not connected to this logical unit. However, the + * device server is capable of supporting the specified peripheral + * device type on this logical unit." + * PDT=0x1f: "Unknown or no device type" + * 2) USB UFI 1.0, p. 20 + * PDT=00h Direct-access device (floppy) + * PDT=1Fh none (no FDD connected to the requested logical unit) */ - if (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f) { + if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && + (result[0] & 0x1f) == 0x1f) { SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: peripheral device type" " of 31, no device added\n")); -- cgit v0.10.2 From a2f5d4d94f0ab9560b9a99d73d5b86b377c7f201 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 10 Aug 2006 21:41:13 -0400 Subject: [SCSI] remove unnecessary includes of linux/config.h from drivers/scsi/ kbuild includes this automatically these days. Signed-off-by: Dave Jones Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index 3f85b5e..ba3bcca 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c @@ -249,8 +249,6 @@ #include #include /* for kmalloc() */ -#include /* for CONFIG_PCI */ - #define AIC7XXX_C_VERSION "5.2.6" #define ALL_TARGETS -1 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index e133733..4580633 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -46,7 +46,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); #include #include /* for kmalloc() */ -#include /* for CONFIG_PCI */ #include /* for PCI support */ #include #include diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index ab2f8b2..6b41c2e 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -15,7 +15,6 @@ * * For more information, visit http://www.highpoint-tech.com */ -#include #include #include #include diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index 4b6aa30..b3095fd 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c @@ -32,7 +32,6 @@ * */ -#include #include #include #include diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h index f51e466..d5a55fa 100644 --- a/drivers/scsi/scsi.h +++ b/drivers/scsi/scsi.h @@ -20,8 +20,6 @@ #ifndef _SCSI_H #define _SCSI_H -#include /* for CONFIG_SCSI_LOGGING */ - #include #include #include -- cgit v0.10.2 From 016131b8fffa1085b4ad165ab228116fdc278ebe Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 14 Aug 2006 08:20:25 -0400 Subject: [SCSI] fc transport: convert fc_host symbolic_name attribute to a dynamic attribute Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index b03aa85..c1c5cdf 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -815,7 +815,6 @@ fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, unsigned long long); -fc_private_host_rd_attr(symbolic_name, "%s\n", (FC_SYMBOLIC_NAME_SIZE +1)); fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); @@ -858,6 +857,7 @@ fc_host_rd_attr(port_id, "0x%06x\n", 20); fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); +fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); /* Private Host Attributes */ @@ -1223,7 +1223,6 @@ fc_attach_transport(struct fc_function_template *ft) SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); SETUP_HOST_ATTRIBUTE_RD(supported_classes); SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); - SETUP_HOST_ATTRIBUTE_RD(symbolic_name); SETUP_HOST_ATTRIBUTE_RD(supported_speeds); SETUP_HOST_ATTRIBUTE_RD(maxframe_size); SETUP_HOST_ATTRIBUTE_RD(serial_number); @@ -1234,6 +1233,7 @@ fc_attach_transport(struct fc_function_template *ft) SETUP_HOST_ATTRIBUTE_RD(active_fc4s); SETUP_HOST_ATTRIBUTE_RD(speed); SETUP_HOST_ATTRIBUTE_RD(fabric_name); + SETUP_HOST_ATTRIBUTE_RD(symbolic_name); /* Transport-managed attributes */ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 6d28b03..b7f62b8 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -409,6 +409,7 @@ struct fc_function_template { void (*get_host_active_fc4s)(struct Scsi_Host *); void (*get_host_speed)(struct Scsi_Host *); void (*get_host_fabric_name)(struct Scsi_Host *); + void (*get_host_symbolic_name)(struct Scsi_Host *); struct fc_host_statistics * (*get_fc_host_stats)(struct Scsi_Host *); void (*reset_fc_host_stats)(struct Scsi_Host *); @@ -445,7 +446,6 @@ struct fc_function_template { unsigned long show_host_permanent_port_name:1; unsigned long show_host_supported_classes:1; unsigned long show_host_supported_fc4s:1; - unsigned long show_host_symbolic_name:1; unsigned long show_host_supported_speeds:1; unsigned long show_host_maxframe_size:1; unsigned long show_host_serial_number:1; @@ -456,6 +456,7 @@ struct fc_function_template { unsigned long show_host_active_fc4s:1; unsigned long show_host_speed:1; unsigned long show_host_fabric_name:1; + unsigned long show_host_symbolic_name:1; }; -- cgit v0.10.2 From 2b6ee9b5295460017fc1bc3d60545512df280908 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 14 Aug 2006 23:09:23 -0700 Subject: [SCSI] aic7*: cleanup MODULE_PARM_DESC strings Modify beginning string to be more readable. Remove one trailing newline. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 998999c..c7eeace 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -321,7 +321,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(AIC79XX_DRIVER_VERSION); module_param(aic79xx, charp, 0444); MODULE_PARM_DESC(aic79xx, -"period delimited, options string.\n" +"period-delimited options string:\n" " verbose Enable verbose/diagnostic logging\n" " allow_memio Allow device registers to be memory mapped\n" " debug Bitmask of debug values to enable\n" @@ -346,7 +346,7 @@ MODULE_PARM_DESC(aic79xx, " Shorten the selection timeout to 128ms\n" "\n" " options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" -"\n"); +); static void ahd_linux_handle_scsi_status(struct ahd_softc *, struct scsi_device *, diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index aa4be8a..e5bb4d8 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -341,7 +341,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(AIC7XXX_DRIVER_VERSION); module_param(aic7xxx, charp, 0444); MODULE_PARM_DESC(aic7xxx, -"period delimited, options string.\n" +"period-delimited options string:\n" " verbose Enable verbose/diagnostic logging\n" " allow_memio Allow device registers to be memory mapped\n" " debug Bitmask of debug values to enable\n" -- cgit v0.10.2 From f3d7271c5ac9029d19fc0252a85bc045334382cc Mon Sep 17 00:00:00 2001 From: Henrik Kretzschmar Date: Tue, 15 Aug 2006 11:17:21 +0200 Subject: [SCSI] convert to PCI_DEVICE() macro Convert the pci_device_id-table of the megaraid_sas-driver to the PCI_DEVICE-macro, to safe some lines. Signed-off-by: Henrik Kretzschmar Acked-by: "Patro, Sumant" Signed-off-by: James Bottomley diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index a8c9627..b36307d 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -53,31 +53,15 @@ MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver"); */ static struct pci_device_id megasas_pci_table[] = { - { - PCI_VENDOR_ID_LSI_LOGIC, - PCI_DEVICE_ID_LSI_SAS1064R, /* xscale IOP */ - PCI_ANY_ID, - PCI_ANY_ID, - }, - { - PCI_VENDOR_ID_LSI_LOGIC, - PCI_DEVICE_ID_LSI_SAS1078R, /* ppc IOP */ - PCI_ANY_ID, - PCI_ANY_ID, - }, - { - PCI_VENDOR_ID_LSI_LOGIC, - PCI_DEVICE_ID_LSI_VERDE_ZCR, /* xscale IOP, vega */ - PCI_ANY_ID, - PCI_ANY_ID, - }, - { - PCI_VENDOR_ID_DELL, - PCI_DEVICE_ID_DELL_PERC5, /* xscale IOP */ - PCI_ANY_ID, - PCI_ANY_ID, - }, - {0} /* Terminating entry */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, + /* xscale IOP */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, + /* ppc IOP */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, + /* xscale IOP, vega */ + {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, + /* xscale IOP */ + {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); -- cgit v0.10.2 From b8d08210126a7b769b857720a59721a453a57a1e Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 17 Aug 2006 08:00:43 -0400 Subject: [SCSI] fc transport: add fc_host system_hostname attribute and u64_to_wwn() This patch updates the fc transport for the following: - Addition of a new attribute "system_hostname" which can be used to set the fully qualified hostname that the fc_host is attached to. The fc_host can then register this string as the FDMI-based host name attribute. Note: for NPIV, a fc_host could be associated with a system which is not the local system. - Add the inline function u64_to_wwn(), which is the inverse of the existing wwn_to_u64() function. - Slight reorg, just to keep dynamic attributes with each other, etc Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index c1c5cdf..79d31ca 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -301,8 +301,6 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, fc_host->supported_classes = FC_COS_UNSPECIFIED; memset(fc_host->supported_fc4s, 0, sizeof(fc_host->supported_fc4s)); - memset(fc_host->symbolic_name, 0, - sizeof(fc_host->symbolic_name)); fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; fc_host->maxframe_size = -1; memset(fc_host->serial_number, 0, @@ -315,6 +313,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, sizeof(fc_host->active_fc4s)); fc_host->speed = FC_PORTSPEED_UNKNOWN; fc_host->fabric_name = -1; + memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name)); + memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname)); fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; @@ -688,6 +688,25 @@ store_fc_host_##field(struct class_device *cdev, const char *buf, \ return count; \ } +#define fc_host_store_str_function(field, slen) \ +static ssize_t \ +store_fc_host_##field(struct class_device *cdev, const char *buf, \ + size_t count) \ +{ \ + struct Scsi_Host *shost = transport_class_to_shost(cdev); \ + struct fc_internal *i = to_fc_internal(shost->transportt); \ + unsigned int cnt=count; \ + \ + /* count may include a LF at end of string */ \ + if (buf[cnt-1] == '\n') \ + cnt--; \ + if (cnt > ((slen) - 1)) \ + return -EINVAL; \ + memcpy(fc_host_##field(shost), buf, cnt); \ + i->f->set_host_##field(shost); \ + return count; \ +} + #define fc_host_rd_attr(field, format_string, sz) \ fc_host_show_function(field, format_string, sz, ) \ static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ @@ -859,6 +878,12 @@ fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); +fc_private_host_show_function(system_hostname, "%s\n", + FC_SYMBOLIC_NAME_SIZE + 1, ) +fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE) +static FC_CLASS_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR, + show_fc_host_system_hostname, store_fc_host_system_hostname); + /* Private Host Attributes */ @@ -1234,6 +1259,7 @@ fc_attach_transport(struct fc_function_template *ft) SETUP_HOST_ATTRIBUTE_RD(speed); SETUP_HOST_ATTRIBUTE_RD(fabric_name); SETUP_HOST_ATTRIBUTE_RD(symbolic_name); + SETUP_HOST_ATTRIBUTE_RW(system_hostname); /* Transport-managed attributes */ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index b7f62b8..c74be5d 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -312,7 +312,6 @@ struct fc_host_attrs { u64 permanent_port_name; u32 supported_classes; u8 supported_fc4s[FC_FC4_LIST_SIZE]; - char symbolic_name[FC_SYMBOLIC_NAME_SIZE]; u32 supported_speeds; u32 maxframe_size; char serial_number[FC_SERIAL_NUMBER_SIZE]; @@ -324,6 +323,8 @@ struct fc_host_attrs { u8 active_fc4s[FC_FC4_LIST_SIZE]; u32 speed; u64 fabric_name; + char symbolic_name[FC_SYMBOLIC_NAME_SIZE]; + char system_hostname[FC_SYMBOLIC_NAME_SIZE]; /* Private (Transport-managed) Attributes */ enum fc_tgtid_binding_type tgtid_bind_type; @@ -354,8 +355,6 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->supported_classes) #define fc_host_supported_fc4s(x) \ (((struct fc_host_attrs *)(x)->shost_data)->supported_fc4s) -#define fc_host_symbolic_name(x) \ - (((struct fc_host_attrs *)(x)->shost_data)->symbolic_name) #define fc_host_supported_speeds(x) \ (((struct fc_host_attrs *)(x)->shost_data)->supported_speeds) #define fc_host_maxframe_size(x) \ @@ -374,6 +373,10 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->speed) #define fc_host_fabric_name(x) \ (((struct fc_host_attrs *)(x)->shost_data)->fabric_name) +#define fc_host_symbolic_name(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->symbolic_name) +#define fc_host_system_hostname(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->system_hostname) #define fc_host_tgtid_bind_type(x) \ (((struct fc_host_attrs *)(x)->shost_data)->tgtid_bind_type) #define fc_host_rports(x) \ @@ -410,6 +413,7 @@ struct fc_function_template { void (*get_host_speed)(struct Scsi_Host *); void (*get_host_fabric_name)(struct Scsi_Host *); void (*get_host_symbolic_name)(struct Scsi_Host *); + void (*set_host_system_hostname)(struct Scsi_Host *); struct fc_host_statistics * (*get_fc_host_stats)(struct Scsi_Host *); void (*reset_fc_host_stats)(struct Scsi_Host *); @@ -457,6 +461,7 @@ struct fc_function_template { unsigned long show_host_speed:1; unsigned long show_host_fabric_name:1; unsigned long show_host_symbolic_name:1; + unsigned long show_host_system_hostname:1; }; @@ -492,6 +497,25 @@ fc_remote_port_chkready(struct fc_rport *rport) return result; } +static inline u64 wwn_to_u64(u8 *wwn) +{ + return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 | + (u64)wwn[2] << 40 | (u64)wwn[3] << 32 | + (u64)wwn[4] << 24 | (u64)wwn[5] << 16 | + (u64)wwn[6] << 8 | (u64)wwn[7]; +} + +static inline void u64_to_wwn(u64 inm, u8 *wwn) +{ + wwn[0] = (inm >> 56) & 0xff; + wwn[1] = (inm >> 48) & 0xff; + wwn[2] = (inm >> 40) & 0xff; + wwn[3] = (inm >> 32) & 0xff; + wwn[4] = (inm >> 24) & 0xff; + wwn[5] = (inm >> 16) & 0xff; + wwn[6] = (inm >> 8) & 0xff; + wwn[7] = inm & 0xff; +} struct scsi_transport_template *fc_attach_transport( struct fc_function_template *); @@ -503,12 +527,4 @@ void fc_remote_port_delete(struct fc_rport *rport); void fc_remote_port_rolechg(struct fc_rport *rport, u32 roles); int scsi_is_fc_rport(const struct device *); -static inline u64 wwn_to_u64(u8 *wwn) -{ - return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 | - (u64)wwn[2] << 40 | (u64)wwn[3] << 32 | - (u64)wwn[4] << 24 | (u64)wwn[5] << 16 | - (u64)wwn[6] << 8 | (u64)wwn[7]; -} - #endif /* SCSI_TRANSPORT_FC_H */ -- cgit v0.10.2 From 4041b9cd87d97a7c73a5bf5a9305dffee2599386 Mon Sep 17 00:00:00 2001 From: Michal Piotrowski Date: Thu, 17 Aug 2006 13:28:22 +0000 Subject: [SCSI] megaraid_sas: pci_module_init to pci_register_driver conversion Signed-off-by: Michal Piotrowski Acked-by: "Patro, Sumant" Signed-off-by: James Bottomley diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index b36307d..4cab5b5 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -2838,7 +2838,7 @@ static int __init megasas_init(void) /* * Register ourselves as PCI hotplug module */ - rval = pci_module_init(&megasas_pci_driver); + rval = pci_register_driver(&megasas_pci_driver); if (rval) { printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); -- cgit v0.10.2 From d2afb3ae04e36dbc6e9eb2d8bd54406ff7b6b3bd Mon Sep 17 00:00:00 2001 From: Daniel Walker Date: Mon, 14 Aug 2006 23:09:23 -0700 Subject: [SCSI] BusLogic gcc 4.1 warning fixes - Reworked all the very long lines in that block (this drivers full of them though) - Returns an error in three places that it didn't before. - Properly clean up after a scsi_add_host() failure. Signed-off-by: Daniel Walker Signed-off-by: Andrew Morton Signed-off-by: James Bottomley diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 16a12a3..59d1ada 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2176,6 +2176,7 @@ static int __init BusLogic_init(void) { int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex; struct BusLogic_HostAdapter *PrototypeHostAdapter; + int ret = 0; #ifdef MODULE if (BusLogic) @@ -2282,25 +2283,49 @@ static int __init BusLogic_init(void) perform Target Device Inquiry. */ if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && - BusLogic_ReportHostAdapterConfiguration(HostAdapter) && BusLogic_AcquireResources(HostAdapter) && BusLogic_CreateInitialCCBs(HostAdapter) && BusLogic_InitializeHostAdapter(HostAdapter) && BusLogic_TargetDeviceInquiry(HostAdapter)) { + BusLogic_ReportHostAdapterConfiguration(HostAdapter) && + BusLogic_AcquireResources(HostAdapter) && + BusLogic_CreateInitialCCBs(HostAdapter) && + BusLogic_InitializeHostAdapter(HostAdapter) && + BusLogic_TargetDeviceInquiry(HostAdapter)) { /* Initialization has been completed successfully. Release and re-register usage of the I/O Address range so that the Model Name of the Host Adapter will appear, and initialize the SCSI Host structure. */ - release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); - if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, HostAdapter->FullModelName)) { - printk(KERN_WARNING "BusLogic: Release and re-register of " "port 0x%04lx failed \n", (unsigned long) HostAdapter->IO_Address); + release_region(HostAdapter->IO_Address, + HostAdapter->AddressCount); + if (!request_region(HostAdapter->IO_Address, + HostAdapter->AddressCount, + HostAdapter->FullModelName)) { + printk(KERN_WARNING + "BusLogic: Release and re-register of " + "port 0x%04lx failed \n", + (unsigned long)HostAdapter->IO_Address); BusLogic_DestroyCCBs(HostAdapter); BusLogic_ReleaseResources(HostAdapter); list_del(&HostAdapter->host_list); scsi_host_put(Host); + ret = -ENOMEM; } else { - BusLogic_InitializeHostStructure(HostAdapter, Host); - scsi_add_host(Host, HostAdapter->PCI_Device ? &HostAdapter->PCI_Device->dev : NULL); - scsi_scan_host(Host); - BusLogicHostAdapterCount++; + BusLogic_InitializeHostStructure(HostAdapter, + Host); + if (scsi_add_host(Host, HostAdapter->PCI_Device + ? &HostAdapter->PCI_Device->dev + : NULL)) { + printk(KERN_WARNING + "BusLogic: scsi_add_host()" + "failed!\n"); + BusLogic_DestroyCCBs(HostAdapter); + BusLogic_ReleaseResources(HostAdapter); + list_del(&HostAdapter->host_list); + scsi_host_put(Host); + ret = -ENODEV; + } else { + scsi_scan_host(Host); + BusLogicHostAdapterCount++; + } } } else { /* @@ -2315,12 +2340,13 @@ static int __init BusLogic_init(void) BusLogic_ReleaseResources(HostAdapter); list_del(&HostAdapter->host_list); scsi_host_put(Host); + ret = -ENODEV; } } kfree(PrototypeHostAdapter); kfree(BusLogic_ProbeInfoList); BusLogic_ProbeInfoList = NULL; - return 0; + return ret; } @@ -2954,6 +2980,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou } +#if 0 /* BusLogic_AbortCommand aborts Command if possible. */ @@ -3024,6 +3051,7 @@ static int BusLogic_AbortCommand(struct scsi_cmnd *Command) return SUCCESS; } +#endif /* BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all currently executing SCSI Commands as having been Reset. -- cgit v0.10.2 From f4ad7b5807385ad1fed0347d966e51a797cd1013 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Fri, 25 Aug 2006 13:48:18 -0500 Subject: [SCSI] scsi_transport_sas: remove local_attached flag This flag denotes local attachment of the phy. There are two problems with it: 1) It's actually redundant ... you can get the same information simply by seeing whether a host is the phys parent 2) we condition a lot of phy parameters on it on the false assumption that we can only control local phys. I'm wiring up phy resets in the aic94xx now, and it will be able to reset non-local phys as well. I fixed 2) by moving the local check into the reset and stats function of the mptsas, since that seems to be the only HBA that can't (currently) control non-local phys. Signed-off-by: James Bottomley diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index dfdd1e4..b752a47 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -852,6 +852,10 @@ static int mptsas_get_linkerrors(struct sas_phy *phy) dma_addr_t dma_handle; int error; + /* FIXME: only have link errors on local phys */ + if (!scsi_is_sas_phy_local(phy)) + return -EINVAL; + hdr.PageVersion = MPI_SASPHY1_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 1 /* page number 1*/; @@ -924,6 +928,10 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) unsigned long timeleft; int error = -ERESTARTSYS; + /* FIXME: fusion doesn't allow non-local phy reset */ + if (!scsi_is_sas_phy_local(phy)) + return -EINVAL; + /* not implemented for expanders */ if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP) return -ENXIO; @@ -1570,9 +1578,6 @@ static int mptsas_probe_one_phy(struct device *dev, if (!phy_info->phy) { - if (local) - phy->local_attached = 1; - error = sas_phy_add(phy); if (error) { sas_phy_free(phy); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 5a625c3..d518c12 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -266,9 +266,6 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \ struct sas_internal *i = to_sas_internal(shost->transportt); \ int error; \ \ - if (!phy->local_attached) \ - return -EINVAL; \ - \ error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ if (error) \ return error; \ @@ -299,9 +296,6 @@ static ssize_t do_sas_phy_reset(struct class_device *cdev, struct sas_internal *i = to_sas_internal(shost->transportt); int error; - if (!phy->local_attached) - return -EINVAL; - error = i->f->phy_reset(phy, hard_reset); if (error) return error; @@ -849,7 +843,7 @@ show_sas_rphy_enclosure_identifier(struct class_device *cdev, char *buf) * Only devices behind an expander are supported, because the * enclosure identifier is a SMP feature. */ - if (phy->local_attached) + if (scsi_is_sas_phy_local(phy)) return -EINVAL; error = i->f->get_enclosure_identifier(rphy, &identifier); @@ -870,7 +864,7 @@ show_sas_rphy_bay_identifier(struct class_device *cdev, char *buf) struct sas_internal *i = to_sas_internal(shost->transportt); int val; - if (phy->local_attached) + if (scsi_is_sas_phy_local(phy)) return -EINVAL; val = i->f->get_bay_identifier(rphy); diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 6cc2314..eeb2200 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -57,9 +57,6 @@ struct sas_phy { enum sas_linkrate maximum_linkrate_hw; enum sas_linkrate maximum_linkrate; - /* internal state */ - unsigned int local_attached : 1; - /* link error statistics */ u32 invalid_dword_count; u32 running_disparity_error_count; @@ -196,4 +193,6 @@ scsi_is_sas_expander_device(struct device *dev) rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE; } +#define scsi_is_sas_phy_local(phy) scsi_is_host_device((phy)->dev.parent) + #endif /* SCSI_TRANSPORT_SAS_H */ -- cgit v0.10.2 From 2908d778ab3e244900c310974e1fc1c69066e450 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 29 Aug 2006 09:22:51 -0500 Subject: [SCSI] aic94xx: new driver This is the end point of the separate aic94xx driver based on the original driver and transport class from Luben Tuikov The log of the separate development is: Alexis Bruemmer: o aic94xx: fix hotplug/unplug for expanderless systems o aic94xx: disable split completion timer/setting by default o aic94xx: wide port off expander support o aic94xx: remove various inline functions o aic94xx: use bitops o aic94xx: remove queue comment o aic94xx: remove sas_common.c o aic94xx: sas remove depot's o aic94xx: use available list_for_each_entry_safe_reverse() o aic94xx: sas header file merge James Bottomley: o aic94xx: fix TF_TMF_NO_CTX processing o aic94xx: convert to request_firmware interface o aic94xx: fix hotplug/unplug o aic94xx: add link error counts to the expander phys o aic94xx: add transport class phy reset capability o aic94xx: remove local_attached flag o Remove README o Fixup Makefile variable for libsas rename o Rename sas->libsas o aic94xx: correct return code for sas_discover_event o aic94xx: use parent backlink port o aic94xx: remove channel abstraction o aic94xx: fix routing algorithms o aic94xx: add backlink port o aic94xx: fix cascaded expander properties o aic94xx: fix sleep under lock o aic94xx: fix panic on module removal in complex topology o aic94xx: make use of the new sas_port o rename sas_port to asd_sas_port o Fix for eh_strategy_handler move o aic94xx: move entirely over to correct transport class formulation o remove last vestages of sas_rphy_alloc() o update for eh_timed_out move o Preliminary expander support for aic94xx o sas: remove event thread o minor warning cleanups o remove last vestiges of id mapping arrays o Further updates o Convert aic94xx over entirely to the transport class end device and o update aic94xx/sas to use the new sas transport class end device o [PATCH] aic94xx: attaching to the sas transport class o Add missing completion removal from prior patch o [PATCH] aic94xx: attaching to the sas transport class o Build fixes from akpm Jeff Garzik: o [scsi aic94xx] Remove ->owner from PCI info table Luben Tuikov: o initial aic94xx driver Mike Anderson: o aic94xx: fix panic on module insertion o aic94xx: stub out SATA_DEV case o aic94xx: compile warning cleanups o aic94xx: sas_alloc_task o aic94xx: ref count update o aic94xx nexus loss time value o [PATCH] aic94xx: driver assertion in non-x86 BIOS env Randy Dunlap: o libsas: externs not needed Robert Tarte: o aic94xx: sequence patch - fixes SATA support Signed-off-by: James Bottomley diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.txt new file mode 100644 index 0000000..9e2078b --- /dev/null +++ b/Documentation/scsi/libsas.txt @@ -0,0 +1,484 @@ +SAS Layer +--------- + +The SAS Layer is a management infrastructure which manages +SAS LLDDs. It sits between SCSI Core and SAS LLDDs. The +layout is as follows: while SCSI Core is concerned with +SAM/SPC issues, and a SAS LLDD+sequencer is concerned with +phy/OOB/link management, the SAS layer is concerned with: + + * SAS Phy/Port/HA event management (LLDD generates, + SAS Layer processes), + * SAS Port management (creation/destruction), + * SAS Domain discovery and revalidation, + * SAS Domain device management, + * SCSI Host registration/unregistration, + * Device registration with SCSI Core (SAS) or libata + (SATA), and + * Expander management and exporting expander control + to user space. + +A SAS LLDD is a PCI device driver. It is concerned with +phy/OOB management, and vendor specific tasks and generates +events to the SAS layer. + +The SAS Layer does most SAS tasks as outlined in the SAS 1.1 +spec. + +The sas_ha_struct describes the SAS LLDD to the SAS layer. +Most of it is used by the SAS Layer but a few fields need to +be initialized by the LLDDs. + +After initializing your hardware, from the probe() function +you call sas_register_ha(). It will register your LLDD with +the SCSI subsystem, creating a SCSI host and it will +register your SAS driver with the sysfs SAS tree it creates. +It will then return. Then you enable your phys to actually +start OOB (at which point your driver will start calling the +notify_* event callbacks). + +Structure descriptions: + +struct sas_phy -------------------- +Normally this is statically embedded to your driver's +phy structure: + struct my_phy { + blah; + struct sas_phy sas_phy; + bleh; + }; +And then all the phys are an array of my_phy in your HA +struct (shown below). + +Then as you go along and initialize your phys you also +initialize the sas_phy struct, along with your own +phy structure. + +In general, the phys are managed by the LLDD and the ports +are managed by the SAS layer. So the phys are initialized +and updated by the LLDD and the ports are initialized and +updated by the SAS layer. + +There is a scheme where the LLDD can RW certain fields, +and the SAS layer can only read such ones, and vice versa. +The idea is to avoid unnecessary locking. + +enabled -- must be set (0/1) +id -- must be set [0,MAX_PHYS) +class, proto, type, role, oob_mode, linkrate -- must be set +oob_mode -- you set this when OOB has finished and then notify +the SAS Layer. + +sas_addr -- this normally points to an array holding the sas +address of the phy, possibly somewhere in your my_phy +struct. + +attached_sas_addr -- set this when you (LLDD) receive an +IDENTIFY frame or a FIS frame, _before_ notifying the SAS +layer. The idea is that sometimes the LLDD may want to fake +or provide a different SAS address on that phy/port and this +allows it to do this. At best you should copy the sas +address from the IDENTIFY frame or maybe generate a SAS +address for SATA directly attached devices. The Discover +process may later change this. + +frame_rcvd -- this is where you copy the IDENTIFY/FIS frame +when you get it; you lock, copy, set frame_rcvd_size and +unlock the lock, and then call the event. It is a pointer +since there's no way to know your hw frame size _exactly_, +so you define the actual array in your phy struct and let +this pointer point to it. You copy the frame from your +DMAable memory to that area holding the lock. + +sas_prim -- this is where primitives go when they're +received. See sas.h. Grab the lock, set the primitive, +release the lock, notify. + +port -- this points to the sas_port if the phy belongs +to a port -- the LLDD only reads this. It points to the +sas_port this phy is part of. Set by the SAS Layer. + +ha -- may be set; the SAS layer sets it anyway. + +lldd_phy -- you should set this to point to your phy so you +can find your way around faster when the SAS layer calls one +of your callbacks and passes you a phy. If the sas_phy is +embedded you can also use container_of -- whatever you +prefer. + + +struct sas_port -------------------- +The LLDD doesn't set any fields of this struct -- it only +reads them. They should be self explanatory. + +phy_mask is 32 bit, this should be enough for now, as I +haven't heard of a HA having more than 8 phys. + +lldd_port -- I haven't found use for that -- maybe other +LLDD who wish to have internal port representation can make +use of this. + + +struct sas_ha_struct -------------------- +It normally is statically declared in your own LLDD +structure describing your adapter: +struct my_sas_ha { + blah; + struct sas_ha_struct sas_ha; + struct my_phy phys[MAX_PHYS]; + struct sas_port sas_ports[MAX_PHYS]; /* (1) */ + bleh; +}; + +(1) If your LLDD doesn't have its own port representation. + +What needs to be initialized (sample function given below). + +pcidev +sas_addr -- since the SAS layer doesn't want to mess with + memory allocation, etc, this points to statically + allocated array somewhere (say in your host adapter + structure) and holds the SAS address of the host + adapter as given by you or the manufacturer, etc. +sas_port +sas_phy -- an array of pointers to structures. (see + note above on sas_addr). + These must be set. See more notes below. +num_phys -- the number of phys present in the sas_phy array, + and the number of ports present in the sas_port + array. There can be a maximum num_phys ports (one per + port) so we drop the num_ports, and only use + num_phys. + +The event interface: + + /* LLDD calls these to notify the class of an event. */ + void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); + void (*notify_port_event)(struct sas_phy *, enum port_event); + void (*notify_phy_event)(struct sas_phy *, enum phy_event); + +When sas_register_ha() returns, those are set and can be +called by the LLDD to notify the SAS layer of such events +the SAS layer. + +The port notification: + + /* The class calls these to notify the LLDD of an event. */ + void (*lldd_port_formed)(struct sas_phy *); + void (*lldd_port_deformed)(struct sas_phy *); + +If the LLDD wants notification when a port has been formed +or deformed it sets those to a function satisfying the type. + +A SAS LLDD should also implement at least one of the Task +Management Functions (TMFs) described in SAM: + + /* Task Management Functions. Must be called from process context. */ + int (*lldd_abort_task)(struct sas_task *); + int (*lldd_abort_task_set)(struct domain_device *, u8 *lun); + int (*lldd_clear_aca)(struct domain_device *, u8 *lun); + int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); + int (*lldd_I_T_nexus_reset)(struct domain_device *); + int (*lldd_lu_reset)(struct domain_device *, u8 *lun); + int (*lldd_query_task)(struct sas_task *); + +For more information please read SAM from T10.org. + +Port and Adapter management: + + /* Port and Adapter management */ + int (*lldd_clear_nexus_port)(struct sas_port *); + int (*lldd_clear_nexus_ha)(struct sas_ha_struct *); + +A SAS LLDD should implement at least one of those. + +Phy management: + + /* Phy management */ + int (*lldd_control_phy)(struct sas_phy *, enum phy_func); + +lldd_ha -- set this to point to your HA struct. You can also +use container_of if you embedded it as shown above. + +A sample initialization and registration function +can look like this (called last thing from probe()) +*but* before you enable the phys to do OOB: + +static int register_sas_ha(struct my_sas_ha *my_ha) +{ + int i; + static struct sas_phy *sas_phys[MAX_PHYS]; + static struct sas_port *sas_ports[MAX_PHYS]; + + my_ha->sas_ha.sas_addr = &my_ha->sas_addr[0]; + + for (i = 0; i < MAX_PHYS; i++) { + sas_phys[i] = &my_ha->phys[i].sas_phy; + sas_ports[i] = &my_ha->sas_ports[i]; + } + + my_ha->sas_ha.sas_phy = sas_phys; + my_ha->sas_ha.sas_port = sas_ports; + my_ha->sas_ha.num_phys = MAX_PHYS; + + my_ha->sas_ha.lldd_port_formed = my_port_formed; + + my_ha->sas_ha.lldd_dev_found = my_dev_found; + my_ha->sas_ha.lldd_dev_gone = my_dev_gone; + + my_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; (1) + + my_ha->sas_ha.lldd_queue_size = ha_can_queue; + my_ha->sas_ha.lldd_execute_task = my_execute_task; + + my_ha->sas_ha.lldd_abort_task = my_abort_task; + my_ha->sas_ha.lldd_abort_task_set = my_abort_task_set; + my_ha->sas_ha.lldd_clear_aca = my_clear_aca; + my_ha->sas_ha.lldd_clear_task_set = my_clear_task_set; + my_ha->sas_ha.lldd_I_T_nexus_reset= NULL; (2) + my_ha->sas_ha.lldd_lu_reset = my_lu_reset; + my_ha->sas_ha.lldd_query_task = my_query_task; + + my_ha->sas_ha.lldd_clear_nexus_port = my_clear_nexus_port; + my_ha->sas_ha.lldd_clear_nexus_ha = my_clear_nexus_ha; + + my_ha->sas_ha.lldd_control_phy = my_control_phy; + + return sas_register_ha(&my_ha->sas_ha); +} + +(1) This is normally a LLDD parameter, something of the +lines of a task collector. What it tells the SAS Layer is +whether the SAS layer should run in Direct Mode (default: +value 0 or 1) or Task Collector Mode (value greater than 1). + +In Direct Mode, the SAS Layer calls Execute Task as soon as +it has a command to send to the SDS, _and_ this is a single +command, i.e. not linked. + +Some hardware (e.g. aic94xx) has the capability to DMA more +than one task at a time (interrupt) from host memory. Task +Collector Mode is an optional feature for HAs which support +this in their hardware. (Again, it is completely optional +even if your hardware supports it.) + +In Task Collector Mode, the SAS Layer would do _natural_ +coalescing of tasks and at the appropriate moment it would +call your driver to DMA more than one task in a single HA +interrupt. DMBS may want to use this by insmod/modprobe +setting the lldd_max_execute_num to something greater than +1. + +(2) SAS 1.1 does not define I_T Nexus Reset TMF. + +Events +------ + +Events are _the only way_ a SAS LLDD notifies the SAS layer +of anything. There is no other method or way a LLDD to tell +the SAS layer of anything happening internally or in the SAS +domain. + +Phy events: + PHYE_LOSS_OF_SIGNAL, (C) + PHYE_OOB_DONE, + PHYE_OOB_ERROR, (C) + PHYE_SPINUP_HOLD. + +Port events, passed on a _phy_: + PORTE_BYTES_DMAED, (M) + PORTE_BROADCAST_RCVD, (E) + PORTE_LINK_RESET_ERR, (C) + PORTE_TIMER_EVENT, (C) + PORTE_HARD_RESET. + +Host Adapter event: + HAE_RESET + +A SAS LLDD should be able to generate + - at least one event from group C (choice), + - events marked M (mandatory) are mandatory (only one), + - events marked E (expander) if it wants the SAS layer + to handle domain revalidation (only one such). + - Unmarked events are optional. + +Meaning: + +HAE_RESET -- when your HA got internal error and was reset. + +PORTE_BYTES_DMAED -- on receiving an IDENTIFY/FIS frame +PORTE_BROADCAST_RCVD -- on receiving a primitive +PORTE_LINK_RESET_ERR -- timer expired, loss of signal, loss +of DWS, etc. (*) +PORTE_TIMER_EVENT -- DWS reset timeout timer expired (*) +PORTE_HARD_RESET -- Hard Reset primitive received. + +PHYE_LOSS_OF_SIGNAL -- the device is gone (*) +PHYE_OOB_DONE -- OOB went fine and oob_mode is valid +PHYE_OOB_ERROR -- Error while doing OOB, the device probably +got disconnected. (*) +PHYE_SPINUP_HOLD -- SATA is present, COMWAKE not sent. + +(*) should set/clear the appropriate fields in the phy, + or alternatively call the inlined sas_phy_disconnected() + which is just a helper, from their tasklet. + +The Execute Command SCSI RPC: + + int (*lldd_execute_task)(struct sas_task *, int num, + unsigned long gfp_flags); + +Used to queue a task to the SAS LLDD. @task is the tasks to +be executed. @num should be the number of tasks being +queued at this function call (they are linked listed via +task::list), @gfp_mask should be the gfp_mask defining the +context of the caller. + +This function should implement the Execute Command SCSI RPC, +or if you're sending a SCSI Task as linked commands, you +should also use this function. + +That is, when lldd_execute_task() is called, the command(s) +go out on the transport *immediately*. There is *no* +queuing of any sort and at any level in a SAS LLDD. + +The use of task::list is two-fold, one for linked commands, +the other discussed below. + +It is possible to queue up more than one task at a time, by +initializing the list element of struct sas_task, and +passing the number of tasks enlisted in this manner in num. + +Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued; + 0, the task(s) were queued. + +If you want to pass num > 1, then either +A) you're the only caller of this function and keep track + of what you've queued to the LLDD, or +B) you know what you're doing and have a strategy of + retrying. + +As opposed to queuing one task at a time (function call), +batch queuing of tasks, by having num > 1, greatly +simplifies LLDD code, sequencer code, and _hardware design_, +and has some performance advantages in certain situations +(DBMS). + +The LLDD advertises if it can take more than one command at +a time at lldd_execute_task(), by setting the +lldd_max_execute_num parameter (controlled by "collector" +module parameter in aic94xx SAS LLDD). + +You should leave this to the default 1, unless you know what +you're doing. + +This is a function of the LLDD, to which the SAS layer can +cater to. + +int lldd_queue_size + The host adapter's queue size. This is the maximum +number of commands the lldd can have pending to domain +devices on behalf of all upper layers submitting through +lldd_execute_task(). + +You really want to set this to something (much) larger than +1. + +This _really_ has absolutely nothing to do with queuing. +There is no queuing in SAS LLDDs. + +struct sas_task { + dev -- the device this task is destined to + list -- must be initialized (INIT_LIST_HEAD) + task_proto -- _one_ of enum sas_proto + scatter -- pointer to scatter gather list array + num_scatter -- number of elements in scatter + total_xfer_len -- total number of bytes expected to be transfered + data_dir -- PCI_DMA_... + task_done -- callback when the task has finished execution +}; + +When an external entity, entity other than the LLDD or the +SAS Layer, wants to work with a struct domain_device, it +_must_ call kobject_get() when getting a handle on the +device and kobject_put() when it is done with the device. + +This does two things: + A) implements proper kfree() for the device; + B) increments/decrements the kref for all players: + domain_device + all domain_device's ... (if past an expander) + port + host adapter + pci device + and up the ladder, etc. + +DISCOVERY +--------- + +The sysfs tree has the following purposes: + a) It shows you the physical layout of the SAS domain at + the current time, i.e. how the domain looks in the + physical world right now. + b) Shows some device parameters _at_discovery_time_. + +This is a link to the tree(1) program, very useful in +viewing the SAS domain: +ftp://mama.indstate.edu/linux/tree/ +I expect user space applications to actually create a +graphical interface of this. + +That is, the sysfs domain tree doesn't show or keep state if +you e.g., change the meaning of the READY LED MEANING +setting, but it does show you the current connection status +of the domain device. + +Keeping internal device state changes is responsibility of +upper layers (Command set drivers) and user space. + +When a device or devices are unplugged from the domain, this +is reflected in the sysfs tree immediately, and the device(s) +removed from the system. + +The structure domain_device describes any device in the SAS +domain. It is completely managed by the SAS layer. A task +points to a domain device, this is how the SAS LLDD knows +where to send the task(s) to. A SAS LLDD only reads the +contents of the domain_device structure, but it never creates +or destroys one. + +Expander management from User Space +----------------------------------- + +In each expander directory in sysfs, there is a file called +"smp_portal". It is a binary sysfs attribute file, which +implements an SMP portal (Note: this is *NOT* an SMP port), +to which user space applications can send SMP requests and +receive SMP responses. + +Functionality is deceptively simple: + +1. Build the SMP frame you want to send. The format and layout + is described in the SAS spec. Leave the CRC field equal 0. +open(2) +2. Open the expander's SMP portal sysfs file in RW mode. +write(2) +3. Write the frame you built in 1. +read(2) +4. Read the amount of data you expect to receive for the frame you built. + If you receive different amount of data you expected to receive, + then there was some kind of error. +close(2) +All this process is shown in detail in the function do_smp_func() +and its callers, in the file "expander_conf.c". + +The kernel functionality is implemented in the file +"sas_expander.c". + +The program "expander_conf.c" implements this. It takes one +argument, the sysfs file name of the SMP portal to the +expander, and gives expander information, including routing +tables. + +The SMP portal gives you complete control of the expander, +so please be careful. diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d61662c..7de5fdf 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -209,7 +209,7 @@ config SCSI_LOGGING there should be no noticeable performance impact as long as you have logging turned off. -menu "SCSI Transport Attributes" +menu "SCSI Transports" depends on SCSI config SCSI_SPI_ATTRS @@ -242,6 +242,8 @@ config SCSI_SAS_ATTRS If you wish to export transport-specific information about each attached SAS device to sysfs, say Y. +source "drivers/scsi/libsas/Kconfig" + endmenu menu "SCSI low-level drivers" @@ -431,6 +433,7 @@ config SCSI_AIC7XXX_OLD module will be called aic7xxx_old. source "drivers/scsi/aic7xxx/Kconfig.aic79xx" +source "drivers/scsi/aic94xx/Kconfig" # All the I2O code and drivers do not seem to be 64bit safe. config SCSI_DPT_I2O diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index b2de9bf..83da70d 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o +obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o @@ -68,6 +69,7 @@ obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/ obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ obj-$(CONFIG_SCSI_AACRAID) += aacraid/ obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o +obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig new file mode 100644 index 0000000..0ed391d --- /dev/null +++ b/drivers/scsi/aic94xx/Kconfig @@ -0,0 +1,41 @@ +# +# Kernel configuration file for aic94xx SAS/SATA driver. +# +# Copyright (c) 2005 Adaptec, Inc. All rights reserved. +# Copyright (c) 2005 Luben Tuikov +# +# This file is licensed under GPLv2. +# +# This file is part of the aic94xx driver. +# +# The aic94xx driver is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# The aic94xx driver is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Aic94xx Driver; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# + +config SCSI_AIC94XX + tristate "Adaptec AIC94xx SAS/SATA support" + depends on PCI + select SCSI_SAS_LIBSAS + help + This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X + AIC94xx chip based host adapters. + +config AIC94XX_DEBUG + bool "Compile in debug mode" + default y + depends on SCSI_AIC94XX + help + Compiles the aic94xx driver in debug mode. In debug mode, + the driver prints some messages to the console. diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile new file mode 100644 index 0000000..e6b7012 --- /dev/null +++ b/drivers/scsi/aic94xx/Makefile @@ -0,0 +1,39 @@ +# +# Makefile for Adaptec aic94xx SAS/SATA driver. +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov +# +# This file is licensed under GPLv2. +# +# This file is part of the the aic94xx driver. +# +# The aic94xx driver is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# The aic94xx driver is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with the aic94xx driver; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +ifeq ($(CONFIG_AIC94XX_DEBUG),y) + EXTRA_CFLAGS += -DASD_DEBUG -DASD_ENTER_EXIT +endif + +obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o +aic94xx-y += aic94xx_init.o \ + aic94xx_hwi.o \ + aic94xx_reg.o \ + aic94xx_sds.o \ + aic94xx_seq.o \ + aic94xx_dump.o \ + aic94xx_scb.o \ + aic94xx_dev.o \ + aic94xx_tmf.o \ + aic94xx_task.o diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h new file mode 100644 index 0000000..cb7caf1 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx.h @@ -0,0 +1,114 @@ +/* + * Aic94xx SAS/SATA driver header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * $Id: //depot/aic94xx/aic94xx.h#31 $ + */ + +#ifndef _AIC94XX_H_ +#define _AIC94XX_H_ + +#include +#include +#include + +#define ASD_DRIVER_NAME "aic94xx" +#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver" + +#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__) + +#ifdef ASD_ENTER_EXIT +#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \ + __FUNCTION__) +#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \ + __FUNCTION__) +#else +#define ENTER +#define EXIT +#endif + +#ifdef ASD_DEBUG +#define ASD_DPRINTK asd_printk +#else +#define ASD_DPRINTK(fmt, ...) +#endif + +/* 2*ITNL timeout + 1 second */ +#define AIC94XX_SCB_TIMEOUT (5*HZ) + +extern kmem_cache_t *asd_dma_token_cache; +extern kmem_cache_t *asd_ascb_cache; +extern char sas_addr_str[2*SAS_ADDR_SIZE + 1]; + +static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr) +{ + int i; + for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2) + snprintf(p, 3, "%02X", sas_addr[i]); + *p = '\0'; +} + +static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p) +{ + int i; + for (i = 0; i < SAS_ADDR_SIZE; i++) { + u8 h, l; + if (!*p) + break; + h = isdigit(*p) ? *p-'0' : *p-'A'+10; + p++; + l = isdigit(*p) ? *p-'0' : *p-'A'+10; + p++; + sas_addr[i] = (h<<4) | l; + } +} + +struct asd_ha_struct; +struct asd_ascb; + +int asd_read_ocm(struct asd_ha_struct *asd_ha); +int asd_read_flash(struct asd_ha_struct *asd_ha); + +int asd_dev_found(struct domain_device *dev); +void asd_dev_gone(struct domain_device *dev); + +void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); + +int asd_execute_task(struct sas_task *, int num, unsigned long gfp_flags); + +/* ---------- TMFs ---------- */ +int asd_abort_task(struct sas_task *); +int asd_abort_task_set(struct domain_device *, u8 *lun); +int asd_clear_aca(struct domain_device *, u8 *lun); +int asd_clear_task_set(struct domain_device *, u8 *lun); +int asd_lu_reset(struct domain_device *, u8 *lun); +int asd_query_task(struct sas_task *); + +/* ---------- Adapter and Port management ---------- */ +int asd_clear_nexus_port(struct asd_sas_port *port); +int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha); + +/* ---------- Phy Management ---------- */ +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func); + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c new file mode 100644 index 0000000..6f8901b --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -0,0 +1,353 @@ +/* + * Aic94xx SAS/SATA DDB management + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * $Id: //depot/aic94xx/aic94xx_dev.c#21 $ + */ + +#include "aic94xx.h" +#include "aic94xx_hwi.h" +#include "aic94xx_reg.h" +#include "aic94xx_sas.h" + +#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \ + (_ha)->hw_prof.max_ddbs) +#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) +#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) + +static inline int asd_get_ddb(struct asd_ha_struct *asd_ha) +{ + unsigned long flags; + int ddb, i; + + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + ddb = FIND_FREE_DDB(asd_ha); + if (ddb >= asd_ha->hw_prof.max_ddbs) { + ddb = -ENOMEM; + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + goto out; + } + SET_DDB(ddb, asd_ha); + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + + for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) + asd_ddbsite_write_dword(asd_ha, ddb, i, 0); +out: + return ddb; +} + +#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag) +#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr) +#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head) +#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type) +#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask) +#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags) +#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2) +#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail) +#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail) +#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb) +#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn) +#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts) +#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr) +#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask) +#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags) +#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status) +#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) +#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) + +static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) +{ + unsigned long flags; + + if (!ddb || ddb >= 0xFFFF) + return; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + CLEAR_DDB(ddb, asd_ha); + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); +} + +static inline void asd_set_ddb_type(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb = (int) (unsigned long) dev->lldd_dev; + + if (dev->dev_type == SATA_PM_PORT) + asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); + else if (dev->tproto) + asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); + else + asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR); +} + +static int asd_init_sata_tag_ddb(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb, i; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2) + asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); + + asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, + SISTER_DDB, ddb); + return 0; +} + +static inline int asd_init_sata(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb = (int) (unsigned long) dev->lldd_dev; + u32 qdepth = 0; + int res = 0; + + asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); + if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) && + dev->sata_dev.identify_device && + dev->sata_dev.identify_device[10] != 0) { + u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]); + u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]); + + if (w76 & 0x100) /* NCQ? */ + qdepth = (w75 & 0x1F) + 1; + asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, + (1<dev_type == SATA_DEV || dev->dev_type == SATA_PM || + dev->dev_type == SATA_PM_PORT) { + struct dev_to_host_fis *fis = (struct dev_to_host_fis *) + dev->frame_rcvd; + asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); + } + asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF); + if (qdepth > 0) + res = asd_init_sata_tag_ddb(dev); + return res; +} + +static int asd_init_target_ddb(struct domain_device *dev) +{ + int ddb, i; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + u8 flags = 0; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + dev->lldd_dev = (void *) (unsigned long) ddb; + + asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE); + asd_ddbsite_write_byte(asd_ha, ddb, 1, 0); + asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF); + for (i = 0; i < SAS_ADDR_SIZE; i++) + asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i, + dev->sas_addr[i]); + asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF); + asd_set_ddb_type(dev); + asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); + if (dev->port->oob_mode != SATA_OOB_MODE) { + flags |= OPEN_REQUIRED; + if ((dev->dev_type == SATA_DEV) || + (dev->tproto & SAS_PROTO_STP)) { + struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; + if (rps_resp->frame_type == SMP_RESPONSE && + rps_resp->function == SMP_REPORT_PHY_SATA && + rps_resp->result == SMP_RESP_FUNC_ACC) { + if (rps_resp->rps.affil_valid) + flags |= STP_AFFIL_POL; + if (rps_resp->rps.affil_supp) + flags |= SUPPORTS_AFFIL; + } + } else { + flags |= CONCURRENT_CONN_SUPP; + if (!dev->parent && + (dev->dev_type == EDGE_DEV || + dev->dev_type == FANOUT_DEV)) + asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, + 4); + else + asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, + dev->pathways); + asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); + } + } + if (dev->dev_type == SATA_PM) + flags |= SATA_MULTIPORT; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); + + flags = 0; + if (dev->tproto & SAS_PROTO_STP) + flags |= STP_CL_POL_NO_TX; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags); + + asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); + + if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) { + i = asd_init_sata(dev); + if (i < 0) { + asd_free_ddb(asd_ha, ddb); + return i; + } + } + + if (dev->dev_type == SAS_END_DEV) { + struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); + if (rdev->I_T_nexus_loss_timeout > 0) + asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, + min(rdev->I_T_nexus_loss_timeout, + (u16)ITNL_TIMEOUT_CONST)); + else + asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, + (u16)ITNL_TIMEOUT_CONST); + } + return 0; +} + +static int asd_init_sata_pm_table_ddb(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb, i; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + for (i = 0; i < 32; i += 2) + asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); + + asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, + SISTER_DDB, ddb); + + return 0; +} + +#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags) +#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb) + +/** + * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port + * dev: pointer to domain device + * + * For SATA Port Multiplier Ports we need to allocate one SATA Port + * Multiplier Port DDB and depending on whether the target on it + * supports SATA II NCQ, one SATA Tag DDB. + */ +static int asd_init_sata_pm_port_ddb(struct domain_device *dev) +{ + int ddb, i, parent_ddb, pmtable_ddb; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + u8 flags; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + asd_set_ddb_type(dev); + flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET; + asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags); + asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); + asd_init_sata(dev); + + parent_ddb = (int) (unsigned long) dev->parent->lldd_dev; + asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb); + pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB); + asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb); + + if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) { + i = asd_init_sata_tag_ddb(dev); + if (i < 0) { + asd_free_ddb(asd_ha, ddb); + return i; + } + } + return 0; +} + +static int asd_init_initiator_ddb(struct domain_device *dev) +{ + return -ENODEV; +} + +/** + * asd_init_sata_pm_ddb -- SATA Port Multiplier + * dev: pointer to domain device + * + * For STP and direct-attached SATA Port Multipliers we need + * one target port DDB entry and one SATA PM table DDB entry. + */ +static int asd_init_sata_pm_ddb(struct domain_device *dev) +{ + int res = 0; + + res = asd_init_target_ddb(dev); + if (res) + goto out; + res = asd_init_sata_pm_table_ddb(dev); + if (res) + asd_free_ddb(dev->port->ha->lldd_ha, + (int) (unsigned long) dev->lldd_dev); +out: + return res; +} + +int asd_dev_found(struct domain_device *dev) +{ + int res = 0; + + switch (dev->dev_type) { + case SATA_PM: + res = asd_init_sata_pm_ddb(dev); + break; + case SATA_PM_PORT: + res = asd_init_sata_pm_port_ddb(dev); + break; + default: + if (dev->tproto) + res = asd_init_target_ddb(dev); + else + res = asd_init_initiator_ddb(dev); + } + return res; +} + +void asd_dev_gone(struct domain_device *dev) +{ + int ddb, sister_ddb; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + + ddb = (int) (unsigned long) dev->lldd_dev; + sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); + + if (sister_ddb != 0xFFFF) + asd_free_ddb(asd_ha, sister_ddb); + asd_free_ddb(asd_ha, ddb); + dev->lldd_dev = NULL; +} diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c new file mode 100644 index 0000000..e6ade59 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dump.c @@ -0,0 +1,959 @@ +/* + * Aic94xx SAS/SATA driver dump interface. + * + * Copyright (C) 2004 Adaptec, Inc. All rights reserved. + * Copyright (C) 2004 David Chaw + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * 2005/07/14/LT Complete overhaul of this file. Update pages, register + * locations, names, etc. Make use of macros. Print more information. + * Print all cseq and lseq mip and mdp. + * + */ + +#include "linux/pci.h" +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_reg_def.h" +#include "aic94xx_sas.h" + +#include "aic94xx_dump.h" + +#ifdef ASD_DEBUG + +#define MD(x) (1 << (x)) +#define MODE_COMMON (1 << 31) +#define MODE_0_7 (0xFF) + +static const struct lseq_cio_regs { + char *name; + u32 offs; + u8 width; + u32 mode; +} LSEQmCIOREGS[] = { + {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, + {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, + {"LmREQMBX", 0x30, 32, MODE_COMMON }, + {"LmRSPMBX", 0x34, 32, MODE_COMMON }, + {"LmMnINT", 0x38, 32, MODE_0_7 }, + {"LmMnINTEN", 0x3C, 32, MODE_0_7 }, + {"LmXMTPRIMD", 0x40, 32, MODE_COMMON }, + {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON }, + {"LmCONSTAT", 0x45, 8, MODE_COMMON }, + {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) }, + {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) }, + {"LmMnEXPHDRP", 0x48, 8, MD(0) }, + {"LmMnSASAALIGN", 0x48, 8, MD(1) }, + {"LmMnMSKHDRP", 0x49, 8, MD(0) }, + {"LmMnSTPALIGN", 0x49, 8, MD(1) }, + {"LmMnRCVHDRP", 0x4A, 8, MD(0) }, + {"LmMnXMTHDRP", 0x4A, 8, MD(1) }, + {"LmALIGNMODE", 0x4B, 8, MD(1) }, + {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) }, + {"LmMnXMTCNT", 0x4C, 32, MD(1) }, + {"LmMnCURRTAG", 0x54, 16, MD(0) }, + {"LmMnPREVTAG", 0x56, 16, MD(0) }, + {"LmMnACKOFS", 0x58, 8, MD(1) }, + {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) }, + {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) }, + {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) }, + {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) }, + {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) }, + {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) }, + {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) }, + {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) }, + {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) }, + {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) }, + {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) }, + {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) }, + {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) }, + {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) }, + {"LmMnXMTCRC", 0x74, 32, MD(1) }, + {"LmCURRTAG", 0x74, 16, MD(0) }, + {"LmPREVTAG", 0x76, 16, MD(0) }, + {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) }, + {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON }, + {"LmMnHOLDLVL", 0x7D, 8, MD(0) }, + {"LmMnSATAFS", 0x7E, 8, MD(1) }, + {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) }, + {"LmPRMSTAT0", 0x80, 32, MODE_COMMON }, + {"LmPRMSTAT1", 0x84, 32, MODE_COMMON }, + {"LmGPRMINT", 0x88, 8, MODE_COMMON }, + {"LmMnCURRSCB", 0x8A, 16, MD(0) }, + {"LmPRMICODE", 0x8C, 32, MODE_COMMON }, + {"LmMnRCVCNT", 0x90, 16, MD(0) }, + {"LmMnBUFSTAT", 0x92, 16, MD(0) }, + {"LmMnXMTHDRSIZE",0x92, 8, MD(1) }, + {"LmMnXMTSIZE", 0x93, 8, MD(1) }, + {"LmMnTGTXFRCNT", 0x94, 32, MD(0) }, + {"LmMnEXPROFS", 0x98, 32, MD(0) }, + {"LmMnXMTROFS", 0x98, 32, MD(1) }, + {"LmMnRCVROFS", 0x9C, 32, MD(0) }, + {"LmCONCTL", 0xA0, 16, MODE_COMMON }, + {"LmBITLTIMER", 0xA2, 16, MODE_COMMON }, + {"LmWWNLOW", 0xA8, 32, MODE_COMMON }, + {"LmWWNHIGH", 0xAC, 32, MODE_COMMON }, + {"LmMnFRMERR", 0xB0, 32, MD(0) }, + {"LmMnFRMERREN", 0xB4, 32, MD(0) }, + {"LmAWTIMER", 0xB8, 16, MODE_COMMON }, + {"LmAWTCTL", 0xBA, 8, MODE_COMMON }, + {"LmMnHDRCMPS", 0xC0, 32, MD(0) }, + {"LmMnXMTSTAT", 0xC4, 8, MD(1) }, + {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON }, + {"LmMnRRDYRC", 0xC6, 8, MD(0) }, + {"LmMnRRDYTC", 0xC6, 8, MD(1) }, + {"LmHWTSTAT", 0xC7, 8, MODE_COMMON }, + {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) }, + {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON }, + {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) }, + {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) }, + {"LmXXXPRIM", 0xD4, 32, MODE_COMMON }, + {"LmRCVASTAT", 0xD9, 8, MODE_COMMON }, + {"LmINTDIS1", 0xDA, 8, MODE_COMMON }, + {"LmPSTORESEL", 0xDB, 8, MODE_COMMON }, + {"LmPSTORE", 0xDC, 32, MODE_COMMON }, + {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON }, + {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON }, + {"LmDONETCTL", 0xF2, 16, MODE_COMMON }, + {NULL, 0, 0, 0 } +}; +/* +static struct lseq_cio_regs LSEQmOOBREGS[] = { + {"OOB_BFLTR" ,0x100, 8, MD(5)}, + {"OOB_INIT_MIN" ,0x102,16, MD(5)}, + {"OOB_INIT_MAX" ,0x104,16, MD(5)}, + {"OOB_INIT_NEG" ,0x106,16, MD(5)}, + {"OOB_SAS_MIN" ,0x108,16, MD(5)}, + {"OOB_SAS_MAX" ,0x10A,16, MD(5)}, + {"OOB_SAS_NEG" ,0x10C,16, MD(5)}, + {"OOB_WAKE_MIN" ,0x10E,16, MD(5)}, + {"OOB_WAKE_MAX" ,0x110,16, MD(5)}, + {"OOB_WAKE_NEG" ,0x112,16, MD(5)}, + {"OOB_IDLE_MAX" ,0x114,16, MD(5)}, + {"OOB_BURST_MAX" ,0x116,16, MD(5)}, + {"OOB_XMIT_BURST" ,0x118, 8, MD(5)}, + {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)}, + {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)}, + {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)}, + {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)}, + {"OOB_SAS_NEGO" ,0x120, 8, MD(5)}, + {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)}, + {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)}, + {"OOB_DATA_KBITS" ,0x126, 8, MD(5)}, + {"OOB_BURST_DATA" ,0x128,32, MD(5)}, + {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)}, + {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)}, + {"OOB_SYNC_DATA" ,0x134,32, MD(5)}, + {"OOB_D10_2_DATA" ,0x138,32, MD(5)}, + {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)}, + {"OOB_SIG_GEN" ,0x140, 8, MD(5)}, + {"OOB_XMIT" ,0x141, 8, MD(5)}, + {"FUNCTION_MAKS" ,0x142, 8, MD(5)}, + {"OOB_MODE" ,0x143, 8, MD(5)}, + {"CURRENT_STATUS" ,0x144, 8, MD(5)}, + {"SPEED_MASK" ,0x145, 8, MD(5)}, + {"PRIM_COUNT" ,0x146, 8, MD(5)}, + {"OOB_SIGNALS" ,0x148, 8, MD(5)}, + {"OOB_DATA_DET" ,0x149, 8, MD(5)}, + {"OOB_TIME_OUT" ,0x14C, 8, MD(5)}, + {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)}, + {"OOB_STATUS" ,0x14E, 8, MD(5)}, + {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)}, + {"RCD_DELAY" ,0x151, 8, MD(5)}, + {"COMSAS_TIMER" ,0x152, 8, MD(5)}, + {"SNTT_DELAY" ,0x153, 8, MD(5)}, + {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)}, + {"SNLT_DELAY" ,0x155, 8, MD(5)}, + {"SNWT_DELAY" ,0x156, 8, MD(5)}, + {"ALIGN_DELAY" ,0x157, 8, MD(5)}, + {"INT_ENABLE_0" ,0x158, 8, MD(5)}, + {"INT_ENABLE_1" ,0x159, 8, MD(5)}, + {"INT_ENABLE_2" ,0x15A, 8, MD(5)}, + {"INT_ENABLE_3" ,0x15B, 8, MD(5)}, + {"OOB_TEST_REG" ,0x15C, 8, MD(5)}, + {"PHY_CONTROL_0" ,0x160, 8, MD(5)}, + {"PHY_CONTROL_1" ,0x161, 8, MD(5)}, + {"PHY_CONTROL_2" ,0x162, 8, MD(5)}, + {"PHY_CONTROL_3" ,0x163, 8, MD(5)}, + {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)}, + {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)}, + {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)}, + {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)}, + {"PHY_CONTROL_4" ,0x168, 8, MD(5)}, + {"PHY_TEST" ,0x169, 8, MD(5)}, + {"PHY_PWR_CTL" ,0x16A, 8, MD(5)}, + {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)}, + {"OOB_SM_CON" ,0x16C, 8, MD(5)}, + {"ADDR_TRAP_1" ,0x16D, 8, MD(5)}, + {"ADDR_NEXT_1" ,0x16E, 8, MD(5)}, + {"NEXT_ST_1" ,0x16F, 8, MD(5)}, + {"OOB_SM_STATE" ,0x170, 8, MD(5)}, + {"ADDR_TRAP_2" ,0x171, 8, MD(5)}, + {"ADDR_NEXT_2" ,0x172, 8, MD(5)}, + {"NEXT_ST_2" ,0x173, 8, MD(5)}, + {NULL, 0, 0, 0 } +}; +*/ +#define STR_8BIT " %30s[0x%04x]:0x%02x\n" +#define STR_16BIT " %30s[0x%04x]:0x%04x\n" +#define STR_32BIT " %30s[0x%04x]:0x%08x\n" +#define STR_64BIT " %30s[0x%04x]:0x%llx\n" + +#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \ + asd_read_reg_byte(_ha, _r)) +#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \ + asd_read_reg_word(_ha, _r)) +#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \ + asd_read_reg_dword(_ha, _r)) + +#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \ + asd_read_reg_byte(_ha, C##_n)) +#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \ + asd_read_reg_word(_ha, C##_n)) +#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \ + asd_read_reg_dword(_ha, C##_n)) + +#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n" +#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n" +#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n" + +#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \ + asd_read_reg_byte(_ha, _r)) +#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \ + asd_read_reg_word(_ha, _r)) +#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \ + asd_read_reg_dword(_ha, _r)) + +/* can also be used for MD when the register is mode aware already */ +#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_byte(_ha, CSEQ_##_n)) +#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_word(_ha, CSEQ_##_n)) +#define PRINT_MIS_dword(_ha, _n) \ + asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_dword(_ha, CSEQ_##_n)) +#define PRINT_MIS_qword(_ha, _n) \ + asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \ + (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \ + | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32))) + +#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n) +#define PRINT_CMDP_word(_ha, _n) \ +asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \ + #_n, \ + asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 7))) + +#define PRINT_CMDP_byte(_ha, _n) \ +asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \ + #_n, \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 7))) + +static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha) +{ + int mode; + + asd_printk("CSEQ STATE\n"); + + asd_printk("ARP2 REGISTERS\n"); + + PRINT_CREG_32bit(asd_ha, ARP2CTL); + PRINT_CREG_32bit(asd_ha, ARP2INT); + PRINT_CREG_32bit(asd_ha, ARP2INTEN); + PRINT_CREG_8bit(asd_ha, MODEPTR); + PRINT_CREG_8bit(asd_ha, ALTMODE); + PRINT_CREG_8bit(asd_ha, FLAG); + PRINT_CREG_8bit(asd_ha, ARP2INTCTL); + PRINT_CREG_16bit(asd_ha, STACK); + PRINT_CREG_16bit(asd_ha, PRGMCNT); + PRINT_CREG_16bit(asd_ha, ACCUM); + PRINT_CREG_16bit(asd_ha, SINDEX); + PRINT_CREG_16bit(asd_ha, DINDEX); + PRINT_CREG_8bit(asd_ha, SINDIR); + PRINT_CREG_8bit(asd_ha, DINDIR); + PRINT_CREG_8bit(asd_ha, JUMLDIR); + PRINT_CREG_8bit(asd_ha, ARP2HALTCODE); + PRINT_CREG_16bit(asd_ha, CURRADDR); + PRINT_CREG_16bit(asd_ha, LASTADDR); + PRINT_CREG_16bit(asd_ha, NXTLADDR); + + asd_printk("IOP REGISTERS\n"); + + PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL); + PRINT_CREG_32bit(asd_ha, MAPPEDSCR); + + asd_printk("CIO REGISTERS\n"); + + for (mode = 0; mode < 9; mode++) + PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode)); + PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15)); + + for (mode = 0; mode < 9; mode++) + PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode)); + PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15)); + + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode)); + + PRINT_CREG_8bit(asd_ha, SCRATCHPAGE); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE, + CMnSCRATCHPAGE(mode)); + + PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON); + PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK); + PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST); + PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE); + PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC); + PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS); + + asd_printk("SCRATCH MEMORY\n"); + + asd_printk("MIP 4 >>>>>\n"); + PRINT_MIS_word(asd_ha, Q_EXE_HEAD); + PRINT_MIS_word(asd_ha, Q_EXE_TAIL); + PRINT_MIS_word(asd_ha, Q_DONE_HEAD); + PRINT_MIS_word(asd_ha, Q_DONE_TAIL); + PRINT_MIS_word(asd_ha, Q_SEND_HEAD); + PRINT_MIS_word(asd_ha, Q_SEND_TAIL); + PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD); + PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL); + PRINT_MIS_word(asd_ha, Q_COPY_HEAD); + PRINT_MIS_word(asd_ha, Q_COPY_TAIL); + PRINT_MIS_word(asd_ha, REG0); + PRINT_MIS_word(asd_ha, REG1); + PRINT_MIS_dword(asd_ha, REG2); + PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP); + PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE); + PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT); + + asd_printk("MIP 5 >>>>\n"); + PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE); + PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT); + PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD); + PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL); + PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB); + PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD); + PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL); + PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET); + + asd_printk("MIP 6 >>>>\n"); + PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0); + PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1); + PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR); + PRINT_MIS_byte(asd_ha, INT_ROUT_MODE); + PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS); + PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX); + PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX); + PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD); + PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL); + PRINT_MIS_byte(asd_ha, FREE_SCB_MASK); + PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD); + PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL); + PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD); + PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL); + + asd_printk("MIP 7 >>>>\n"); + PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE); + PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT); + PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD); + PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL); + PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB); + PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD); + PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL); + PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET); + PRINT_MIS_word(asd_ha, PRIMITIVE_DATA); + PRINT_MIS_dword(asd_ha, TIMEOUT_CONST); + + asd_printk("MDP 0 >>>>\n"); + asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n", + "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7"); + PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX); + PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR); + PRINT_CMDP_word(asd_ha, Q_LINK_HEAD); + PRINT_CMDP_word(asd_ha, Q_LINK_TAIL); + PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE); + + asd_printk("MDP 0 Mode 8 >>>>\n"); + PRINT_MIS_word(asd_ha, RET_ADDR); + PRINT_MIS_word(asd_ha, RET_SCBPTR); + PRINT_MIS_word(asd_ha, SAVE_SCBPTR); + PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX); + PRINT_MIS_word(asd_ha, RESP_LEN); + PRINT_MIS_word(asd_ha, TMF_SCBPTR); + PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB); + PRINT_MIS_word(asd_ha, GLOBAL_HEAD); + PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD); + PRINT_MIS_byte(asd_ha, TMF_OPCODE); + PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS); + PRINT_MIS_word(asd_ha, HSB_SITE); + PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE); + PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE); + + asd_printk("MDP 1 Mode 8 >>>>\n"); + PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR); + PRINT_MIS_qword(asd_ha, LUN_TO_CHECK); + + asd_printk("MDP 2 Mode 8 >>>>\n"); + PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER); + PRINT_MIS_qword(asd_ha, HQ_DONE_BASE); + PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER); + PRINT_MIS_byte(asd_ha, HQ_DONE_PASS); +} + +#define PRINT_LREG_8bit(_h, _lseq, _n) \ + asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq))) +#define PRINT_LREG_16bit(_h, _lseq, _n) \ + asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq))) +#define PRINT_LREG_32bit(_h, _lseq, _n) \ + asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq))) + +#define PRINT_LMIP_byte(_h, _lseq, _n) \ + asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_byte(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_word(_h, _lseq, _n) \ + asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_word(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_dword(_h, _lseq, _n) \ + asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_qword(_h, _lseq, _n) \ + asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + (unsigned long long)(((unsigned long long) \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \ + | (((unsigned long long) \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32))) + +static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha, + u32 lseq_cio_addr, int i) +{ + switch (LSEQmCIOREGS[i].width) { + case 8: + asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_byte(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + + break; + case 16: + asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_word(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + + break; + case 32: + asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_dword(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + break; + } +} + +static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 moffs; + int mode; + + asd_printk("LSEQ %d STATE\n", lseq); + + asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq); + PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL); + PRINT_LREG_32bit(asd_ha, lseq, ARP2INT); + PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN); + PRINT_LREG_8bit(asd_ha, lseq, MODEPTR); + PRINT_LREG_8bit(asd_ha, lseq, ALTMODE); + PRINT_LREG_8bit(asd_ha, lseq, FLAG); + PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL); + PRINT_LREG_16bit(asd_ha, lseq, STACK); + PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT); + PRINT_LREG_16bit(asd_ha, lseq, ACCUM); + PRINT_LREG_16bit(asd_ha, lseq, SINDEX); + PRINT_LREG_16bit(asd_ha, lseq, DINDEX); + PRINT_LREG_8bit(asd_ha, lseq, SINDIR); + PRINT_LREG_8bit(asd_ha, lseq, DINDIR); + PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR); + PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE); + PRINT_LREG_16bit(asd_ha, lseq, CURRADDR); + PRINT_LREG_16bit(asd_ha, lseq, LASTADDR); + PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR); + + asd_printk("LSEQ%d: IOP REGISTERS\n", lseq); + + PRINT_LREG_32bit(asd_ha, lseq, MODECTL); + PRINT_LREG_32bit(asd_ha, lseq, DBGMODE); + PRINT_LREG_32bit(asd_ha, lseq, CONTROL); + PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq)); + PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq)); + + asd_printk("LSEQ%d: CIO REGISTERS\n", lseq); + asd_printk("Mode common:\n"); + + for (mode = 0; mode < 8; mode++) { + u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq); + int i; + + for (i = 0; LSEQmCIOREGS[i].name; i++) + if (LSEQmCIOREGS[i].mode == MODE_COMMON) + asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i); + } + + asd_printk("Mode unique:\n"); + for (mode = 0; mode < 8; mode++) { + u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq); + int i; + + asd_printk("Mode %d\n", mode); + for (i = 0; LSEQmCIOREGS[i].name; i++) { + if (!(LSEQmCIOREGS[i].mode & (1 << mode))) + continue; + asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i); + } + } + + asd_printk("SCRATCH MEMORY\n"); + + asd_printk("LSEQ%d MIP 0 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD); + PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER); + PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS); + PRINT_LMIP_qword(asd_ha, lseq, CONNECTION_STATE); + PRINT_LMIP_word(asd_ha, lseq, CONCTL); + PRINT_LMIP_byte(asd_ha, lseq, CONSTAT); + PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES); + PRINT_LMIP_word(asd_ha, lseq, REG1_ISR); + PRINT_LMIP_word(asd_ha, lseq, REG2_ISR); + PRINT_LMIP_word(asd_ha, lseq, REG3_ISR); + PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR); + + asd_printk("LSEQ%d MIP 1 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL); + PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST); + PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX); + PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX); + + asd_printk("LSEQ%d MIP 2 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL); + + asd_printk("LSEQ%d MIP 3 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST); + PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT); + + for (mode = 0; mode < 3; mode++) { + asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode); + moffs = mode * LSEQ_MODE_SCRATCH_SIZE; + + asd_printk(STR_16BIT, "RET_ADDR", 0, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + + moffs)); + asd_printk(STR_16BIT, "REG0_MODE", 2, + asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + + moffs)); + asd_printk(STR_16BIT, "MODE_FLAGS", 4, + asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + + moffs)); + asd_printk(STR_16BIT, "RET_ADDR2", 0x6, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + + moffs)); + asd_printk(STR_16BIT, "RET_ADDR1", 0x8, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + + moffs)); + asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB, + asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + + moffs)); + asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC, + asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + + moffs)); + } + + asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq); + moffs = LSEQ_MODE5_PAGE0_OFFSET; + asd_printk(STR_16BIT, "RET_ADDR", 0, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs)); + asd_printk(STR_16BIT, "REG0_MODE", 2, + asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs)); + asd_printk(STR_16BIT, "MODE_FLAGS", 4, + asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs)); + asd_printk(STR_16BIT, "RET_ADDR2", 0x6, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs)); + asd_printk(STR_16BIT, "RET_ADDR1", 0x8, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs)); + asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB, + asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs)); + asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC, + asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs)); + + asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX); + PRINT_LMIP_word(asd_ha, lseq, RESP_LEN); + PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE); + PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL); + PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE); + PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR); + + asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD); + PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX); + PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG); + PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE); + PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE); + PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR); + + asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER); + PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR); + PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE); + PRINT_LMIP_word(asd_ha, lseq, IP_BITL); + PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG); + PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH); + + asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE); + PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS); + PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT); + PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES); + PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES); + PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT); + PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT); + + asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0); + PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1); + + asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0); + PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1); + + asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT); + PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT); + PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT); + + asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK); + PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT); + PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK); + PRINT_LMIP_word(asd_ha, lseq, TAG_MASK); + PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG); + PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET); + + asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS); + PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS); + PRINT_LMIP_word(asd_ha, lseq, SDB_DDB); + PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS); + PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG); + + asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME); + PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL); + + asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS); + + asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS); +} + +/** + * asd_dump_ddb_site -- dump a CSEQ DDB site + * @asd_ha: pointer to host adapter structure + * @site_no: site number of interest + */ +void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no) +{ + if (site_no >= asd_ha->hw_prof.max_ddbs) + return; + +#define DDB_FIELDB(__name) \ + asd_ddbsite_read_byte(asd_ha, site_no, \ + offsetof(struct asd_ddb_ssp_smp_target_port, __name)) +#define DDB2_FIELDB(__name) \ + asd_ddbsite_read_byte(asd_ha, site_no, \ + offsetof(struct asd_ddb_stp_sata_target_port, __name)) +#define DDB_FIELDW(__name) \ + asd_ddbsite_read_word(asd_ha, site_no, \ + offsetof(struct asd_ddb_ssp_smp_target_port, __name)) + +#define DDB_FIELDD(__name) \ + asd_ddbsite_read_dword(asd_ha, site_no, \ + offsetof(struct asd_ddb_ssp_smp_target_port, __name)) + + asd_printk("DDB: 0x%02x\n", site_no); + asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type)); + asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate)); + asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag))); + asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head))); + asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended)); + asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type)); + asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def)); + asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features)); + asd_printk("Pathway Blocked Count: 0x%02x\n", + DDB_FIELDB(pathway_blocked_count)); + asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time)); + asd_printk("more_compat_features: 0x%08x\n", + DDB_FIELDD(more_compat_features)); + asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask)); + asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags)); + asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2)); + asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail)); + asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail)); + asd_printk("Active Task Count: 0x%04x\n", + DDB_FIELDW(active_task_count)); + asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason)); + asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout)); + asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp)); +} + +void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) +{ +#define DDB0_FIELDB(__name) \ + asd_ddbsite_read_byte(asd_ha, 0, \ + offsetof(struct asd_ddb_seq_shared, __name)) +#define DDB0_FIELDW(__name) \ + asd_ddbsite_read_word(asd_ha, 0, \ + offsetof(struct asd_ddb_seq_shared, __name)) + +#define DDB0_FIELDD(__name) \ + asd_ddbsite_read_dword(asd_ha,0 , \ + offsetof(struct asd_ddb_seq_shared, __name)) + +#define DDB0_FIELDA(__name, _o) \ + asd_ddbsite_read_byte(asd_ha, 0, \ + offsetof(struct asd_ddb_seq_shared, __name)+_o) + + + asd_printk("DDB: 0\n"); + asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head)); + asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail)); + asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt)); + asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head)); + asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail)); + asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock)); + asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag)); + asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt)); + asd_printk("est_nexus_buf_thresh:%04x\n", + DDB0_FIELDW(est_nexus_buf_thresh)); + asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active)); + asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up)); + asd_printk("port_map_by_links:%02x %02x %02x %02x " + "%02x %02x %02x %02x\n", + DDB0_FIELDA(port_map_by_links, 0), + DDB0_FIELDA(port_map_by_links, 1), + DDB0_FIELDA(port_map_by_links, 2), + DDB0_FIELDA(port_map_by_links, 3), + DDB0_FIELDA(port_map_by_links, 4), + DDB0_FIELDA(port_map_by_links, 5), + DDB0_FIELDA(port_map_by_links, 6), + DDB0_FIELDA(port_map_by_links, 7)); +} + +static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no) +{ + +#define SCB_FIELDB(__name) \ + asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \ + + offsetof(struct initiate_ssp_task, __name)) +#define SCB_FIELDW(__name) \ + asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \ + + offsetof(struct initiate_ssp_task, __name)) +#define SCB_FIELDD(__name) \ + asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \ + + offsetof(struct initiate_ssp_task, __name)) + + asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len)); + asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type)); + asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag)); + asd_printk("Target Port Xfer Tag: 0x%04x.\n", + SCB_FIELDW(ssp_frame.tptt)); + asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs)); + asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count)); +} + +/** + * asd_dump_scb_sites -- dump currently used CSEQ SCB sites + * @asd_ha: pointer to host adapter struct + */ +void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) +{ + u16 site_no; + + for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) { + u8 opcode; + + if (!SCB_SITE_VALID(site_no)) + continue; + + /* We are only interested in SCB sites currently used. + */ + opcode = asd_scbsite_read_byte(asd_ha, site_no, + offsetof(struct scb_header, + opcode)); + if (opcode == 0xFF) + continue; + + asd_printk("\nSCB: 0x%x\n", site_no); + asd_dump_scb_site(asd_ha, site_no); + } +} + +/** + * ads_dump_seq_state -- dump CSEQ and LSEQ states + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of LSEQs of interest + */ +void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + + asd_dump_cseq_state(asd_ha); + + if (lseq_mask != 0) + for_each_sequencer(lseq_mask, lseq_mask, lseq) + asd_dump_lseq_state(asd_ha, lseq); +} + +void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl) +{ + unsigned long flags; + int i; + + switch ((dl->status_block[1] & 0x70) >> 3) { + case SAS_PROTO_STP: + ASD_DPRINTK("STP proto device-to-host FIS:\n"); + break; + default: + case SAS_PROTO_SSP: + ASD_DPRINTK("SAS proto IDENTIFY:\n"); + break; + } + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4) + ASD_DPRINTK("%02x: %02x %02x %02x %02x\n", + i, + phy->frame_rcvd[i], + phy->frame_rcvd[i+1], + phy->frame_rcvd[i+2], + phy->frame_rcvd[i+3]); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); +} + +static inline void asd_dump_scb(struct asd_ascb *ascb, int ind) +{ + asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ind, ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); +} + +void asd_dump_scb_list(struct asd_ascb *ascb, int num) +{ + int i = 0; + + asd_printk("dumping %d scbs:\n", num); + + asd_dump_scb(ascb, i++); + --num; + + if (num > 0 && !list_empty(&ascb->list)) { + struct list_head *el; + + list_for_each(el, &ascb->list) { + struct asd_ascb *s = list_entry(el, struct asd_ascb, + list); + asd_dump_scb(s, i++); + if (--num <= 0) + break; + } + } +} + +#endif /* ASD_DEBUG */ diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h new file mode 100644 index 0000000..0c388e7 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dump.h @@ -0,0 +1,52 @@ +/* + * Aic94xx SAS/SATA driver dump header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_DUMP_H_ +#define _AIC94XX_DUMP_H_ + +#ifdef ASD_DEBUG + +void asd_dump_ddb_0(struct asd_ha_struct *asd_ha); +void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no); +void asd_dump_scb_sites(struct asd_ha_struct *asd_ha); +void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask); +void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl); +void asd_dump_scb_list(struct asd_ascb *ascb, int num); +#else /* ASD_DEBUG */ + +static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { } +static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, + u16 site_no) { } +static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { } +static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha, + u8 lseq_mask) { } +static inline void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl) { } +static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { } +#endif /* ASD_DEBUG */ + +#endif /* _AIC94XX_DUMP_H_ */ diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c new file mode 100644 index 0000000..075cea8 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -0,0 +1,1376 @@ +/* + * Aic94xx SAS/SATA driver hardware interface. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" +#include "aic94xx_dump.h" + +u32 MBAR0_SWB_SIZE; + +/* ---------- Initialization ---------- */ + +static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) +{ + extern char sas_addr_str[]; + /* If the user has specified a WWN it overrides other settings + */ + if (sas_addr_str[0] != '\0') + asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr, + sas_addr_str); + else if (asd_ha->hw_prof.sas_addr[0] != 0) + asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr); +} + +static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) +{ + int i; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0) + continue; + /* Set a phy's address only if it has none. + */ + ASD_DPRINTK("setting phy%d addr to %llx\n", i, + SAS_ADDR(asd_ha->hw_prof.sas_addr)); + memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, + asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); + } +} + +/* ---------- PHY initialization ---------- */ + +static void asd_init_phy_identify(struct asd_phy *phy) +{ + phy->identify_frame = phy->id_frm_tok->vaddr; + + memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); + + phy->identify_frame->dev_type = SAS_END_DEV; + if (phy->sas_phy.role & PHY_ROLE_INITIATOR) + phy->identify_frame->initiator_bits = phy->sas_phy.iproto; + if (phy->sas_phy.role & PHY_ROLE_TARGET) + phy->identify_frame->target_bits = phy->sas_phy.tproto; + memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr, + SAS_ADDR_SIZE); + phy->identify_frame->phy_id = phy->sas_phy.id; +} + +static int asd_init_phy(struct asd_phy *phy) +{ + struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->enabled = 1; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTO_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = PHY_LINKRATE_NONE; + + phy->id_frm_tok = asd_alloc_coherent(asd_ha, + sizeof(*phy->identify_frame), + GFP_KERNEL); + if (!phy->id_frm_tok) { + asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id); + return -ENOMEM; + } else + asd_init_phy_identify(phy); + + memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd)); + + return 0; +} + +static int asd_init_phys(struct asd_ha_struct *asd_ha) +{ + u8 i; + u8 phy_mask = asd_ha->hw_prof.enabled_phys; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_phy *phy = &asd_ha->phys[i]; + + phy->phy_desc = &asd_ha->hw_prof.phy_desc[i]; + + phy->sas_phy.enabled = 0; + phy->sas_phy.id = i; + phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0]; + phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0]; + phy->sas_phy.ha = &asd_ha->sas_ha; + phy->sas_phy.lldd_phy = phy; + } + + /* Now enable and initialize only the enabled phys. */ + for_each_phy(phy_mask, phy_mask, i) { + int err = asd_init_phy(&asd_ha->phys[i]); + if (err) + return err; + } + + return 0; +} + +/* ---------- Sliding windows ---------- */ + +static int asd_init_sw(struct asd_ha_struct *asd_ha) +{ + struct pci_dev *pcidev = asd_ha->pcidev; + int err; + u32 v; + + /* Unlock MBARs */ + err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v); + if (err) { + asd_printk("couldn't access conf. space of %s\n", + pci_name(pcidev)); + goto Err; + } + if (v) + err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v); + if (err) { + asd_printk("couldn't write to MBAR_KEY of %s\n", + pci_name(pcidev)); + goto Err; + } + + /* Set sliding windows A, B and C to point to proper internal + * memory regions. + */ + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR); + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB, + REG_BASE_ADDR_CSEQCIO); + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI); + asd_ha->io_handle[0].swa_base = REG_BASE_ADDR; + asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO; + asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI; + MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80; + if (!asd_ha->iospace) { + /* MBAR1 will point to OCM (On Chip Memory) */ + pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR); + asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR; + } + spin_lock_init(&asd_ha->iolock); +Err: + return err; +} + +/* ---------- SCB initialization ---------- */ + +/** + * asd_init_scbs - manually allocate the first SCB. + * @asd_ha: pointer to host adapter structure + * + * This allocates the very first SCB which would be sent to the + * sequencer for execution. Its bus address is written to + * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of + * the _next_ scb to be DMA-ed to the host adapter is read from the last + * SCB DMA-ed to the host adapter, we have to always stay one step + * ahead of the sequencer and keep one SCB already allocated. + */ +static int asd_init_scbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int bitmap_bytes; + + /* allocate the index array and bitmap */ + asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs; + asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits* + sizeof(void *), GFP_KERNEL); + if (!asd_ha->seq.tc_index_array) + return -ENOMEM; + + bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; + bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); + asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); + if (!asd_ha->seq.tc_index_bitmap) + return -ENOMEM; + + spin_lock_init(&seq->tc_index_lock); + + seq->next_scb.size = sizeof(struct scb); + seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, + &seq->next_scb.dma_handle); + if (!seq->next_scb.vaddr) { + kfree(asd_ha->seq.tc_index_bitmap); + kfree(asd_ha->seq.tc_index_array); + asd_ha->seq.tc_index_bitmap = NULL; + asd_ha->seq.tc_index_array = NULL; + return -ENOMEM; + } + + seq->pending = 0; + spin_lock_init(&seq->pend_q_lock); + INIT_LIST_HEAD(&seq->pend_q); + + return 0; +} + +static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) +{ + asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; + asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; + ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n", + asd_ha->hw_prof.max_scbs, + asd_ha->hw_prof.max_ddbs); +} + +/* ---------- Done List initialization ---------- */ + +static void asd_dl_tasklet_handler(unsigned long); + +static int asd_init_dl(struct asd_ha_struct *asd_ha) +{ + asd_ha->seq.actual_dl + = asd_alloc_coherent(asd_ha, + ASD_DL_SIZE * sizeof(struct done_list_struct), + GFP_KERNEL); + if (!asd_ha->seq.actual_dl) + return -ENOMEM; + asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; + asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE; + asd_ha->seq.dl_next = 0; + tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler, + (unsigned long) asd_ha); + + return 0; +} + +/* ---------- EDB and ESCB init ---------- */ + +static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, unsigned int gfp_flags) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags); + if (!seq->edb_arr) + return -ENOMEM; + + for (i = 0; i < seq->num_edbs; i++) { + seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE, + gfp_flags); + if (!seq->edb_arr[i]) + goto Err_unroll; + memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); + } + + ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs); + + return 0; + +Err_unroll: + for (i-- ; i >= 0; i--) + asd_free_coherent(asd_ha, seq->edb_arr[i]); + kfree(seq->edb_arr); + seq->edb_arr = NULL; + + return -ENOMEM; +} + +static int asd_alloc_escbs(struct asd_ha_struct *asd_ha, + unsigned int gfp_flags) +{ + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *escb; + int i, escbs; + + seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr), + gfp_flags); + if (!seq->escb_arr) + return -ENOMEM; + + escbs = seq->num_escbs; + escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags); + if (!escb) { + asd_printk("couldn't allocate list of escbs\n"); + goto Err; + } + seq->num_escbs -= escbs; /* subtract what was not allocated */ + ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs); + + for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next, + struct asd_ascb, + list)) { + seq->escb_arr[i] = escb; + escb->scb->header.opcode = EMPTY_SCB; + } + + return 0; +Err: + kfree(seq->escb_arr); + seq->escb_arr = NULL; + return -ENOMEM; + +} + +static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i, k, z = 0; + + for (i = 0; i < seq->num_escbs; i++) { + struct asd_ascb *ascb = seq->escb_arr[i]; + struct empty_scb *escb = &ascb->scb->escb; + + ascb->edb_index = z; + + escb->num_valid = ASD_EDBS_PER_SCB; + + for (k = 0; k < ASD_EDBS_PER_SCB; k++) { + struct sg_el *eb = &escb->eb[k]; + struct asd_dma_tok *edb = seq->edb_arr[z++]; + + memset(eb, 0, sizeof(*eb)); + eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); + eb->size = cpu_to_le32(((u32) edb->size)); + } + } +} + +/** + * asd_init_escbs -- allocate and initialize empty scbs + * @asd_ha: pointer to host adapter structure + * + * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers. + * They transport sense data, etc. + */ +static int asd_init_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int err = 0; + + /* Allocate two empty data buffers (edb) per sequencer. */ + int edbs = 2*(1+asd_ha->hw_prof.num_phys); + + seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB; + seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB; + + err = asd_alloc_edbs(asd_ha, GFP_KERNEL); + if (err) { + asd_printk("couldn't allocate edbs\n"); + return err; + } + + err = asd_alloc_escbs(asd_ha, GFP_KERNEL); + if (err) { + asd_printk("couldn't allocate escbs\n"); + return err; + } + + asd_assign_edbs2escbs(asd_ha); + /* In order to insure that normal SCBs do not overfill sequencer + * memory and leave no space for escbs (halting condition), + * we increment pending here by the number of escbs. However, + * escbs are never pending. + */ + seq->pending = seq->num_escbs; + seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2; + + return 0; +} + +/* ---------- HW initialization ---------- */ + +/** + * asd_chip_hardrst -- hard reset the chip + * @asd_ha: pointer to host adapter structure + * + * This takes 16 cycles and is synchronous to CFCLK, which runs + * at 200 MHz, so this should take at most 80 nanoseconds. + */ +int asd_chip_hardrst(struct asd_ha_struct *asd_ha) +{ + int i; + int count = 100; + u32 reg; + + for (i = 0 ; i < 4 ; i++) { + asd_write_reg_dword(asd_ha, COMBIST, HARDRST); + } + + do { + udelay(1); + reg = asd_read_reg_dword(asd_ha, CHIMINT); + if (reg & HARDRSTDET) { + asd_write_reg_dword(asd_ha, CHIMINT, + HARDRSTDET|PORRSTDET); + return 0; + } + } while (--count > 0); + + return -ENODEV; +} + +/** + * asd_init_chip -- initialize the chip + * @asd_ha: pointer to host adapter structure + * + * Hard resets the chip, disables HA interrupts, downloads the sequnecer + * microcode and starts the sequencers. The caller has to explicitly + * enable HA interrupts with asd_enable_ints(asd_ha). + */ +static int asd_init_chip(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_chip_hardrst(asd_ha); + if (err) { + asd_printk("couldn't hard reset %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } + + asd_disable_ints(asd_ha); + + err = asd_init_seqs(asd_ha); + if (err) { + asd_printk("couldn't init seqs for %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } + + err = asd_start_seqs(asd_ha); + if (err) { + asd_printk("coudln't start seqs for %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } +out: + return err; +} + +#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE)) + +static int max_devs = 0; +module_param_named(max_devs, max_devs, int, S_IRUGO); +MODULE_PARM_DESC(max_devs, "\n" + "\tMaximum number of SAS devices to support (not LUs).\n" + "\tDefault: 2176, Maximum: 65663.\n"); + +static int max_cmnds = 0; +module_param_named(max_cmnds, max_cmnds, int, S_IRUGO); +MODULE_PARM_DESC(max_cmnds, "\n" + "\tMaximum number of commands queuable.\n" + "\tDefault: 512, Maximum: 66047.\n"); + +static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha) +{ + unsigned long dma_addr = OCM_BASE_ADDR; + u32 d; + + dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; + asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d |= 4; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + asd_ha->hw_prof.max_ddbs += MAX_DEVS; +} + +static int asd_extend_devctx(struct asd_ha_struct *asd_ha) +{ + dma_addr_t dma_handle; + unsigned long dma_addr; + u32 d; + int size; + + asd_extend_devctx_ocm(asd_ha); + + asd_ha->hw_prof.ddb_ext = NULL; + if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) { + max_devs = asd_ha->hw_prof.max_ddbs; + return 0; + } + + size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE; + + asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); + if (!asd_ha->hw_prof.ddb_ext) { + asd_printk("couldn't allocate memory for %d devices\n", + max_devs); + max_devs = asd_ha->hw_prof.max_ddbs; + return -ENOMEM; + } + dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle; + dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE); + dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; + dma_handle = (dma_addr_t) dma_addr; + asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d &= ~4; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + + asd_ha->hw_prof.max_ddbs = max_devs; + + return 0; +} + +static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha) +{ + dma_addr_t dma_handle; + unsigned long dma_addr; + u32 d; + int size; + + asd_ha->hw_prof.scb_ext = NULL; + if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) { + max_cmnds = asd_ha->hw_prof.max_scbs; + return 0; + } + + size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE; + + asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); + if (!asd_ha->hw_prof.scb_ext) { + asd_printk("couldn't allocate memory for %d commands\n", + max_cmnds); + max_cmnds = asd_ha->hw_prof.max_scbs; + return -ENOMEM; + } + dma_handle = asd_ha->hw_prof.scb_ext->dma_handle; + dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE); + dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE; + dma_handle = (dma_addr_t) dma_addr; + asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d &= ~1; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + + asd_ha->hw_prof.max_scbs = max_cmnds; + + return 0; +} + +/** + * asd_init_ctxmem -- initialize context memory + * asd_ha: pointer to host adapter structure + * + * This function sets the maximum number of SCBs and + * DDBs which can be used by the sequencer. This is normally + * 512 and 128 respectively. If support for more SCBs or more DDBs + * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are + * initialized here to extend context memory to point to host memory, + * thus allowing unlimited support for SCBs and DDBs -- only limited + * by host memory. + */ +static int asd_init_ctxmem(struct asd_ha_struct *asd_ha) +{ + int bitmap_bytes; + + asd_get_max_scb_ddb(asd_ha); + asd_extend_devctx(asd_ha); + asd_extend_cmdctx(asd_ha); + + /* The kernel wants bitmaps to be unsigned long sized. */ + bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8; + bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); + asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); + if (!asd_ha->hw_prof.ddb_bitmap) + return -ENOMEM; + spin_lock_init(&asd_ha->hw_prof.ddb_lock); + + return 0; +} + +int asd_init_hw(struct asd_ha_struct *asd_ha) +{ + int err; + u32 v; + + err = asd_init_sw(asd_ha); + if (err) + return err; + + err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v); + if (err) { + asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, + v | SC_TMR_DIS); + if (err) { + asd_printk("couldn't disable split completion timer of %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + err = asd_read_ocm(asd_ha); + if (err) { + asd_printk("couldn't read ocm(%d)\n", err); + /* While suspicios, it is not an error that we + * couldn't read the OCM. */ + } + + err = asd_read_flash(asd_ha); + if (err) { + asd_printk("couldn't read flash(%d)\n", err); + /* While suspicios, it is not an error that we + * couldn't read FLASH memory. + */ + } + + asd_init_ctxmem(asd_ha); + + asd_get_user_sas_addr(asd_ha); + if (!asd_ha->hw_prof.sas_addr[0]) { + asd_printk("No SAS Address provided for %s\n", + pci_name(asd_ha->pcidev)); + err = -ENODEV; + goto Out; + } + + asd_propagate_sas_addr(asd_ha); + + err = asd_init_phys(asd_ha); + if (err) { + asd_printk("couldn't initialize phys for %s\n", + pci_name(asd_ha->pcidev)); + goto Out; + } + + err = asd_init_scbs(asd_ha); + if (err) { + asd_printk("couldn't initialize scbs for %s\n", + pci_name(asd_ha->pcidev)); + goto Out; + } + + err = asd_init_dl(asd_ha); + if (err) { + asd_printk("couldn't initialize the done list:%d\n", + err); + goto Out; + } + + err = asd_init_escbs(asd_ha); + if (err) { + asd_printk("couldn't initialize escbs\n"); + goto Out; + } + + err = asd_init_chip(asd_ha); + if (err) { + asd_printk("couldn't init the chip\n"); + goto Out; + } +Out: + return err; +} + +/* ---------- Chip reset ---------- */ + +/** + * asd_chip_reset -- reset the host adapter, etc + * @asd_ha: pointer to host adapter structure of interest + * + * Called from the ISR. Hard reset the chip. Let everything + * timeout. This should be no different than hot-unplugging the + * host adapter. Once everything times out we'll init the chip with + * a call to asd_init_chip() and enable interrupts with asd_enable_ints(). + * XXX finish. + */ +static void asd_chip_reset(struct asd_ha_struct *asd_ha) +{ + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + + ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); + asd_chip_hardrst(asd_ha); + sas_ha->notify_ha_event(sas_ha, HAE_RESET); +} + +/* ---------- Done List Routines ---------- */ + +static void asd_dl_tasklet_handler(unsigned long data) +{ + struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data; + struct asd_seq_data *seq = &asd_ha->seq; + unsigned long flags; + + while (1) { + struct done_list_struct *dl = &seq->dl[seq->dl_next]; + struct asd_ascb *ascb; + + if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle) + break; + + /* find the aSCB */ + spin_lock_irqsave(&seq->tc_index_lock, flags); + ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index)); + spin_unlock_irqrestore(&seq->tc_index_lock, flags); + if (unlikely(!ascb)) { + ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n"); + goto next_1; + } else if (ascb->scb->header.opcode == EMPTY_SCB) { + goto out; + } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { + goto next_1; + } + spin_lock_irqsave(&seq->pend_q_lock, flags); + list_del_init(&ascb->list); + seq->pending--; + spin_unlock_irqrestore(&seq->pend_q_lock, flags); + out: + ascb->tasklet_complete(ascb, dl); + + next_1: + seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1); + if (!seq->dl_next) + seq->dl_toggle ^= DL_TOGGLE_MASK; + } +} + +/* ---------- Interrupt Service Routines ---------- */ + +/** + * asd_process_donelist_isr -- schedule processing of done list entries + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) +{ + tasklet_schedule(&asd_ha->seq.dl_tasklet); +} + +/** + * asd_com_sas_isr -- process device communication interrupt (COMINT) + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha) +{ + u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); + + /* clear COMSTAT int */ + asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF); + + if (comstat & CSBUFPERR) { + asd_printk("%s: command/status buffer dma parity error\n", + pci_name(asd_ha->pcidev)); + } else if (comstat & CSERR) { + int i; + u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); + dmaerr &= 0xFF; + asd_printk("%s: command/status dma error, DMAERR: 0x%02x, " + "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n", + pci_name(asd_ha->pcidev), + dmaerr, + asd_read_reg_dword(asd_ha, CSDMAADR), + asd_read_reg_dword(asd_ha, CSDMAADR+4)); + asd_printk("CSBUFFER:\n"); + for (i = 0; i < 8; i++) { + asd_printk("%08x %08x %08x %08x\n", + asd_read_reg_dword(asd_ha, CSBUFFER), + asd_read_reg_dword(asd_ha, CSBUFFER+4), + asd_read_reg_dword(asd_ha, CSBUFFER+8), + asd_read_reg_dword(asd_ha, CSBUFFER+12)); + } + asd_dump_seq_state(asd_ha, 0); + } else if (comstat & OVLYERR) { + u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); + dmaerr = (dmaerr >> 8) & 0xFF; + asd_printk("%s: overlay dma error:0x%x\n", + pci_name(asd_ha->pcidev), + dmaerr); + } + asd_chip_reset(asd_ha); +} + +static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) +{ + static const char *halt_code[256] = { + "UNEXPECTED_INTERRUPT0", + "UNEXPECTED_INTERRUPT1", + "UNEXPECTED_INTERRUPT2", + "UNEXPECTED_INTERRUPT3", + "UNEXPECTED_INTERRUPT4", + "UNEXPECTED_INTERRUPT5", + "UNEXPECTED_INTERRUPT6", + "UNEXPECTED_INTERRUPT7", + "UNEXPECTED_INTERRUPT8", + "UNEXPECTED_INTERRUPT9", + "UNEXPECTED_INTERRUPT10", + [11 ... 19] = "unknown[11,19]", + "NO_FREE_SCB_AVAILABLE", + "INVALID_SCB_OPCODE", + "INVALID_MBX_OPCODE", + "INVALID_ATA_STATE", + "ATA_QUEUE_FULL", + "ATA_TAG_TABLE_FAULT", + "ATA_TAG_MASK_FAULT", + "BAD_LINK_QUEUE_STATE", + "DMA2CHIM_QUEUE_ERROR", + "EMPTY_SCB_LIST_FULL", + "unknown[30]", + "IN_USE_SCB_ON_FREE_LIST", + "BAD_OPEN_WAIT_STATE", + "INVALID_STP_AFFILIATION", + "unknown[34]", + "EXEC_QUEUE_ERROR", + "TOO_MANY_EMPTIES_NEEDED", + "EMPTY_REQ_QUEUE_ERROR", + "Q_MONIRTT_MGMT_ERROR", + "TARGET_MODE_FLOW_ERROR", + "DEVICE_QUEUE_NOT_FOUND", + "START_IRTT_TIMER_ERROR", + "ABORT_TASK_ILLEGAL_REQ", + [43 ... 255] = "unknown[43,255]" + }; + + if (dchstatus & CSEQINT) { + u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT); + + if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) { + asd_printk("%s: CSEQ arp2int:0x%x\n", + pci_name(asd_ha->pcidev), + arp2int); + } else if (arp2int & ARP2HALTC) + asd_printk("%s: CSEQ halted: %s\n", + pci_name(asd_ha->pcidev), + halt_code[(arp2int>>16)&0xFF]); + else + asd_printk("%s: CARP2INT:0x%x\n", + pci_name(asd_ha->pcidev), + arp2int); + } + if (dchstatus & LSEQINT_MASK) { + int lseq; + u8 lseq_mask = dchstatus & LSEQINT_MASK; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + u32 arp2int = asd_read_reg_dword(asd_ha, + LmARP2INT(lseq)); + if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR + | ARP2CIOPERR)) { + asd_printk("%s: LSEQ%d arp2int:0x%x\n", + pci_name(asd_ha->pcidev), + lseq, arp2int); + /* XXX we should only do lseq reset */ + } else if (arp2int & ARP2HALTC) + asd_printk("%s: LSEQ%d halted: %s\n", + pci_name(asd_ha->pcidev), + lseq,halt_code[(arp2int>>16)&0xFF]); + else + asd_printk("%s: LSEQ%d ARP2INT:0x%x\n", + pci_name(asd_ha->pcidev), lseq, + arp2int); + } + } + asd_chip_reset(asd_ha); +} + +/** + * asd_dch_sas_isr -- process device channel interrupt (DEVINT) + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) +{ + u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); + + if (dchstatus & CFIFTOERR) { + asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev)); + asd_chip_reset(asd_ha); + } else + asd_arp2_err(asd_ha, dchstatus); +} + +/** + * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR) + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) +{ + u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); + + if (!(stat0r & ASIERR)) { + asd_printk("hmm, EXSI interrupted but no error?\n"); + return; + } + + if (stat0r & ASIFMTERR) { + asd_printk("ASI SEEPROM format error for %s\n", + pci_name(asd_ha->pcidev)); + } else if (stat0r & ASISEECHKERR) { + u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R); + asd_printk("ASI SEEPROM checksum 0x%x error for %s\n", + stat1r & CHECKSUM_MASK, + pci_name(asd_ha->pcidev)); + } else { + u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR); + + if (!(statr & CPI2ASIMSTERR_MASK)) { + ASD_DPRINTK("hmm, ASIERR?\n"); + return; + } else { + u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR); + u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR); + + asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, " + "count: 0x%x, byteen: 0x%x, targerr: 0x%x " + "master id: 0x%x, master err: 0x%x\n", + pci_name(asd_ha->pcidev), + addr, data, + (statr & CPI2ASIBYTECNT_MASK) >> 16, + (statr & CPI2ASIBYTEEN_MASK) >> 12, + (statr & CPI2ASITARGERR_MASK) >> 8, + (statr & CPI2ASITARGMID_MASK) >> 4, + (statr & CPI2ASIMSTERR_MASK)); + } + } + asd_chip_reset(asd_ha); +} + +/** + * asd_hst_pcix_isr -- process host interface interrupts + * @asd_ha: pointer to host adapter structure + * + * Asserted on PCIX errors: target abort, etc. + */ +static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) +{ + u16 status; + u32 pcix_status; + u32 ecc_status; + + pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status); + pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status); + pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status); + + if (status & PCI_STATUS_DETECTED_PARITY) + asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_REC_MASTER_ABORT) + asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_REC_TARGET_ABORT) + asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_PARITY) + asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev)); + else if (pcix_status & RCV_SCE) { + asd_printk("received split completion error for %s\n", + pci_name(asd_ha->pcidev)); + pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); + /* XXX: Abort task? */ + return; + } else if (pcix_status & UNEXP_SC) { + asd_printk("unexpected split completion for %s\n", + pci_name(asd_ha->pcidev)); + pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); + /* ignore */ + return; + } else if (pcix_status & SC_DISCARD) + asd_printk("split completion discarded for %s\n", + pci_name(asd_ha->pcidev)); + else if (ecc_status & UNCOR_ECCERR) + asd_printk("uncorrectable ECC error for %s\n", + pci_name(asd_ha->pcidev)); + asd_chip_reset(asd_ha); +} + +/** + * asd_hw_isr -- host adapter interrupt service routine + * @irq: ignored + * @dev_id: pointer to host adapter structure + * @regs: ignored + * + * The ISR processes done list entries and level 3 error handling. + */ +irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + struct asd_ha_struct *asd_ha = dev_id; + u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT); + + if (!chimint) + return IRQ_NONE; + + asd_write_reg_dword(asd_ha, CHIMINT, chimint); + (void) asd_read_reg_dword(asd_ha, CHIMINT); + + if (chimint & DLAVAIL) + asd_process_donelist_isr(asd_ha); + if (chimint & COMINT) + asd_com_sas_isr(asd_ha); + if (chimint & DEVINT) + asd_dch_sas_isr(asd_ha); + if (chimint & INITERR) + asd_rbi_exsi_isr(asd_ha); + if (chimint & HOSTERR) + asd_hst_pcix_isr(asd_ha); + + return IRQ_HANDLED; +} + +/* ---------- SCB handling ---------- */ + +static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, + unsigned int gfp_flags) +{ + extern kmem_cache_t *asd_ascb_cache; + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *ascb; + unsigned long flags; + + ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags); + + if (ascb) { + memset(ascb, 0, sizeof(*ascb)); + ascb->dma_scb.size = sizeof(struct scb); + ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, + gfp_flags, + &ascb->dma_scb.dma_handle); + if (!ascb->dma_scb.vaddr) { + kmem_cache_free(asd_ascb_cache, ascb); + return NULL; + } + memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb)); + asd_init_ascb(asd_ha, ascb); + + spin_lock_irqsave(&seq->tc_index_lock, flags); + ascb->tc_index = asd_tc_index_get(seq, ascb); + spin_unlock_irqrestore(&seq->tc_index_lock, flags); + if (ascb->tc_index == -1) + goto undo; + + ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index); + } + + return ascb; +undo: + dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, + ascb->dma_scb.dma_handle); + kmem_cache_free(asd_ascb_cache, ascb); + ASD_DPRINTK("no index for ascb\n"); + return NULL; +} + +/** + * asd_ascb_alloc_list -- allocate a list of aSCBs + * @asd_ha: pointer to host adapter structure + * @num: pointer to integer number of aSCBs + * @gfp_flags: GFP_ flags. + * + * This is the only function which is used to allocate aSCBs. + * It can allocate one or many. If more than one, then they form + * a linked list in two ways: by their list field of the ascb struct + * and by the next_scb field of the scb_header. + * + * Returns NULL if no memory was available, else pointer to a list + * of ascbs. When this function returns, @num would be the number + * of SCBs which were not able to be allocated, 0 if all requested + * were able to be allocated. + */ +struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct + *asd_ha, int *num, + unsigned int gfp_flags) +{ + struct asd_ascb *first = NULL; + + for ( ; *num > 0; --*num) { + struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags); + + if (!ascb) + break; + else if (!first) + first = ascb; + else { + struct asd_ascb *last = list_entry(first->list.prev, + struct asd_ascb, + list); + list_add_tail(&ascb->list, &first->list); + last->scb->header.next_scb = + cpu_to_le64(((u64)ascb->dma_scb.dma_handle)); + } + } + + return first; +} + +/** + * asd_swap_head_scb -- swap the head scb + * @asd_ha: pointer to host adapter structure + * @ascb: pointer to the head of an ascb list + * + * The sequencer knows the DMA address of the next SCB to be DMAed to + * the host adapter, from initialization or from the last list DMAed. + * seq->next_scb keeps the address of this SCB. The sequencer will + * DMA to the host adapter this list of SCBs. But the head (first + * element) of this list is not known to the sequencer. Here we swap + * the head of the list with the known SCB (memcpy()). + * Only one memcpy() is required per list so it is in our interest + * to keep the list of SCB as long as possible so that the ratio + * of number of memcpy calls to the number of SCB DMA-ed is as small + * as possible. + * + * LOCKING: called with the pending list lock held. + */ +static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha, + struct asd_ascb *ascb) +{ + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *last = list_entry(ascb->list.prev, + struct asd_ascb, + list); + struct asd_dma_tok t = ascb->dma_scb; + + memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); + ascb->dma_scb = seq->next_scb; + ascb->scb = ascb->dma_scb.vaddr; + seq->next_scb = t; + last->scb->header.next_scb = + cpu_to_le64(((u64)seq->next_scb.dma_handle)); +} + +/** + * asd_start_timers -- (add and) start timers of SCBs + * @list: pointer to struct list_head of the scbs + * @to: timeout in jiffies + * + * If an SCB in the @list has no timer function, assign the default + * one, then start the timer of the SCB. This function is + * intended to be called from asd_post_ascb_list(), just prior to + * posting the SCBs to the sequencer. + */ +static inline void asd_start_scb_timers(struct list_head *list) +{ + struct asd_ascb *ascb; + list_for_each_entry(ascb, list, list) { + if (!ascb->uldd_timer) { + ascb->timer.data = (unsigned long) ascb; + ascb->timer.function = asd_ascb_timedout; + ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; + add_timer(&ascb->timer); + } + } +} + +/** + * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter + * @asd_ha: pointer to a host adapter structure + * @ascb: pointer to the first aSCB in the list + * @num: number of aSCBs in the list (to be posted) + * + * See queueing comment in asd_post_escb_list(). + * + * Additional note on queuing: In order to minimize the ratio of memcpy() + * to the number of ascbs sent, we try to batch-send as many ascbs as possible + * in one go. + * Two cases are possible: + * A) can_queue >= num, + * B) can_queue < num. + * Case A: we can send the whole batch at once. Increment "pending" + * in the beginning of this function, when it is checked, in order to + * eliminate races when this function is called by multiple processes. + * Case B: should never happen if the managing layer considers + * lldd_queue_size. + */ +int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num) +{ + unsigned long flags; + LIST_HEAD(list); + int can_queue; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending; + if (can_queue >= num) + asd_ha->seq.pending += num; + else + can_queue = 0; + + if (!can_queue) { + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev)); + return -SAS_QUEUE_FULL; + } + + asd_swap_head_scb(asd_ha, ascb); + + __list_add(&list, ascb->list.prev, &ascb->list); + + asd_start_scb_timers(&list); + + asd_ha->seq.scbpro += num; + list_splice_init(&list, asd_ha->seq.pend_q.prev); + asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return 0; +} + +/** + * asd_post_escb_list -- post a list of 1 or more empty scb + * @asd_ha: pointer to a host adapter structure + * @ascb: pointer to the first empty SCB in the list + * @num: number of aSCBs in the list (to be posted) + * + * This is essentially the same as asd_post_ascb_list, but we do not + * increment pending, add those to the pending list or get indexes. + * See asd_init_escbs() and asd_init_post_escbs(). + * + * Since sending a list of ascbs is a superset of sending a single + * ascb, this function exists to generalize this. More specifically, + * when sending a list of those, we want to do only a _single_ + * memcpy() at swap head, as opposed to for each ascb sent (in the + * case of sending them one by one). That is, we want to minimize the + * ratio of memcpy() operations to the number of ascbs sent. The same + * logic applies to asd_post_ascb_list(). + */ +int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num) +{ + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_swap_head_scb(asd_ha, ascb); + asd_ha->seq.scbpro += num; + asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return 0; +} + +/* ---------- LED ---------- */ + +/** + * asd_turn_led -- turn on/off an LED + * @asd_ha: pointer to host adapter structure + * @phy_id: the PHY id whose LED we want to manupulate + * @op: 1 to turn on, 0 to turn off + */ +void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op) +{ + if (phy_id < ASD_MAX_PHYS) { + u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id)); + if (op) + v |= LEDPOL; + else + v &= ~LEDPOL; + asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v); + } +} + +/** + * asd_control_led -- enable/disable an LED on the board + * @asd_ha: pointer to host adapter structure + * @phy_id: integer, the phy id + * @op: integer, 1 to enable, 0 to disable the LED + * + * First we output enable the LED, then we set the source + * to be an external module. + */ +void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op) +{ + if (phy_id < ASD_MAX_PHYS) { + u32 v; + + v = asd_read_reg_dword(asd_ha, GPIOOER); + if (op) + v |= (1 << phy_id); + else + v &= ~(1 << phy_id); + asd_write_reg_dword(asd_ha, GPIOOER, v); + + v = asd_read_reg_dword(asd_ha, GPIOCNFGR); + if (op) + v |= (1 << phy_id); + else + v &= ~(1 << phy_id); + asd_write_reg_dword(asd_ha, GPIOCNFGR, v); + } +} + +/* ---------- PHY enable ---------- */ + +static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id) +{ + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY), + HOTPLUG_DELAY_TIMEOUT); + + /* Get defaults from manuf. sector */ + /* XXX we need defaults for those in case MS is broken. */ + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0), + phy->phy_desc->phy_control_0); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1), + phy->phy_desc->phy_control_1); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2), + phy->phy_desc->phy_control_2); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3), + phy->phy_desc->phy_control_3); + + asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id), + ASD_COMINIT_TIMEOUT); + + asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id), + phy->id_frm_tok->dma_handle); + + asd_control_led(asd_ha, phy_id, 1); + + return 0; +} + +int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask) +{ + u8 phy_m; + u8 i; + int num = 0, k; + struct asd_ascb *ascb; + struct asd_ascb *ascb_list; + + if (!phy_mask) { + asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__); + return 0; + } + + for_each_phy(phy_mask, phy_m, i) { + num++; + asd_enable_phy(asd_ha, i); + } + + k = num; + ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL); + if (!ascb_list) { + asd_printk("no memory for control phy ascb list\n"); + return -ENOMEM; + } + num -= k; + + ascb = ascb_list; + for_each_phy(phy_mask, phy_m, i) { + asd_build_control_phy(ascb, i, ENABLE_PHY); + ascb = list_entry(ascb->list.next, struct asd_ascb, list); + } + ASD_DPRINTK("posting %d control phy scbs\n", num); + k = asd_post_ascb_list(asd_ha, ascb_list, num); + if (k) + asd_ascb_free_list(ascb_list); + + return k; +} diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h new file mode 100644 index 0000000..c7d5053 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h @@ -0,0 +1,397 @@ +/* + * Aic94xx SAS/SATA driver hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_HWI_H_ +#define _AIC94XX_HWI_H_ + +#include +#include +#include + +#include + +#include "aic94xx.h" +#include "aic94xx_sas.h" + +/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */ +#define ASD_MAX_PHYS 8 +#define ASD_PCBA_SN_SIZE 12 + +/* Those are to be further named properly, the "RAZORx" part, and + * subsequently included in include/linux/pci_ids.h. + */ +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR10 0x410 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR12 0x412 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR1E 0x41E +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR30 0x430 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR32 0x432 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3E 0x43E +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3F 0x43F + +struct asd_ha_addrspace { + void __iomem *addr; + unsigned long start; /* pci resource start */ + unsigned long len; /* pci resource len */ + unsigned long flags; /* pci resource flags */ + + /* addresses internal to the host adapter */ + u32 swa_base; /* mmspace 1 (MBAR1) uses this only */ + u32 swb_base; + u32 swc_base; +}; + +struct bios_struct { + int present; + u8 maj; + u8 min; + u32 bld; +}; + +struct unit_element_struct { + u16 num; + u16 size; + void *area; +}; + +struct flash_struct { + u32 bar; + int present; + int wide; + u8 manuf; + u8 dev_id; + u8 sec_prot; + + u32 dir_offs; +}; + +struct asd_phy_desc { + /* From CTRL-A settings, then set to what is appropriate */ + u8 sas_addr[SAS_ADDR_SIZE]; + u8 max_sas_lrate; + u8 min_sas_lrate; + u8 max_sata_lrate; + u8 min_sata_lrate; + u8 flags; +#define ASD_CRC_DIS 1 +#define ASD_SATA_SPINUP_HOLD 2 + + u8 phy_control_0; /* mode 5 reg 0x160 */ + u8 phy_control_1; /* mode 5 reg 0x161 */ + u8 phy_control_2; /* mode 5 reg 0x162 */ + u8 phy_control_3; /* mode 5 reg 0x163 */ +}; + +struct asd_dma_tok { + void *vaddr; + dma_addr_t dma_handle; + size_t size; +}; + +struct hw_profile { + struct bios_struct bios; + struct unit_element_struct ue; + struct flash_struct flash; + + u8 sas_addr[SAS_ADDR_SIZE]; + char pcba_sn[ASD_PCBA_SN_SIZE+1]; + + u8 enabled_phys; /* mask of enabled phys */ + struct asd_phy_desc phy_desc[ASD_MAX_PHYS]; + u32 max_scbs; /* absolute sequencer scb queue size */ + struct asd_dma_tok *scb_ext; + u32 max_ddbs; + struct asd_dma_tok *ddb_ext; + + spinlock_t ddb_lock; + void *ddb_bitmap; + + int num_phys; /* ENABLEABLE */ + int max_phys; /* REPORTED + ENABLEABLE */ + + unsigned addr_range; /* max # of addrs; max # of possible ports */ + unsigned port_name_base; + unsigned dev_name_base; + unsigned sata_name_base; +}; + +struct asd_ascb { + struct list_head list; + struct asd_ha_struct *ha; + + struct scb *scb; /* equals dma_scb->vaddr */ + struct asd_dma_tok dma_scb; + struct asd_dma_tok *sg_arr; + + void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *); + u8 uldd_timer:1; + + /* internally generated command */ + struct timer_list timer; + struct completion completion; + u8 tag_valid:1; + __be16 tag; /* error recovery only */ + + /* If this is an Empty SCB, index of first edb in seq->edb_arr. */ + int edb_index; + + /* Used by the timer timeout function. */ + int tc_index; + + void *uldd_task; +}; + +#define ASD_DL_SIZE_BITS 0x8 +#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS)) +#define ASD_DEF_DL_TOGGLE 0x01 + +struct asd_seq_data { + spinlock_t pend_q_lock; + u16 scbpro; + int pending; + struct list_head pend_q; + int can_queue; /* per adapter */ + struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */ + + spinlock_t tc_index_lock; + void **tc_index_array; + void *tc_index_bitmap; + int tc_index_bitmap_bits; + + struct tasklet_struct dl_tasklet; + struct done_list_struct *dl; /* array of done list entries, equals */ + struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */ + int dl_toggle; + int dl_next; + + int num_edbs; + struct asd_dma_tok **edb_arr; + int num_escbs; + struct asd_ascb **escb_arr; /* array of pointers to escbs */ +}; + +/* This is the Host Adapter structure. It describes the hardware + * SAS adapter. + */ +struct asd_ha_struct { + struct pci_dev *pcidev; + const char *name; + + struct sas_ha_struct sas_ha; + + u8 revision_id; + + int iospace; + spinlock_t iolock; + struct asd_ha_addrspace io_handle[2]; + + struct hw_profile hw_prof; + + struct asd_phy phys[ASD_MAX_PHYS]; + struct asd_sas_port ports[ASD_MAX_PHYS]; + + struct dma_pool *scb_pool; + + struct asd_seq_data seq; /* sequencer related */ +}; + +/* ---------- Common macros ---------- */ + +#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle)) +#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \ + ? ((u32)((__dma_handle) >> 32)) \ + : ((u32)0)) + +#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev)) +#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \ + && ((__site_no) & 0xF0FF) > 0x001F) +/* For each bit set in __lseq_mask, set __lseq to equal the bit + * position of the set bit and execute the statement following. + * __mc is the temporary mask, used as a mask "counter". + */ +#define for_each_sequencer(__lseq_mask, __mc, __lseq) \ + for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\ + if (((__mc) & 1)) +#define for_each_phy(__lseq_mask, __mc, __lseq) \ + for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\ + if (((__mc) & 1)) + +#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I))) + +/* ---------- DMA allocs ---------- */ + +static inline struct asd_dma_tok *asd_dmatok_alloc(unsigned int flags) +{ + return kmem_cache_alloc(asd_dma_token_cache, flags); +} + +static inline void asd_dmatok_free(struct asd_dma_tok *token) +{ + kmem_cache_free(asd_dma_token_cache, token); +} + +static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct * + asd_ha, size_t size, + unsigned int flags) +{ + struct asd_dma_tok *token = asd_dmatok_alloc(flags); + if (token) { + token->size = size; + token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev, + token->size, + &token->dma_handle, + flags); + if (!token->vaddr) { + asd_dmatok_free(token); + token = NULL; + } + } + return token; +} + +static inline void asd_free_coherent(struct asd_ha_struct *asd_ha, + struct asd_dma_tok *token) +{ + if (token) { + dma_free_coherent(&asd_ha->pcidev->dev, token->size, + token->vaddr, token->dma_handle); + asd_dmatok_free(token); + } +} + +static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, + struct asd_ascb *ascb) +{ + INIT_LIST_HEAD(&ascb->list); + ascb->scb = ascb->dma_scb.vaddr; + ascb->ha = asd_ha; + ascb->timer.function = NULL; + init_timer(&ascb->timer); + ascb->tc_index = -1; + init_completion(&ascb->completion); +} + +/* Must be called with the tc_index_lock held! + */ +static inline void asd_tc_index_release(struct asd_seq_data *seq, int index) +{ + seq->tc_index_array[index] = NULL; + clear_bit(index, seq->tc_index_bitmap); +} + +/* Must be called with the tc_index_lock held! + */ +static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr) +{ + int index; + + index = find_first_zero_bit(seq->tc_index_bitmap, + seq->tc_index_bitmap_bits); + if (index == seq->tc_index_bitmap_bits) + return -1; + + seq->tc_index_array[index] = ptr; + set_bit(index, seq->tc_index_bitmap); + + return index; +} + +/* Must be called with the tc_index_lock held! + */ +static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index) +{ + return seq->tc_index_array[index]; +} + +/** + * asd_ascb_free -- free a single aSCB after is has completed + * @ascb: pointer to the aSCB of interest + * + * This frees an aSCB after it has been executed/completed by + * the sequencer. + */ +static inline void asd_ascb_free(struct asd_ascb *ascb) +{ + if (ascb) { + struct asd_ha_struct *asd_ha = ascb->ha; + unsigned long flags; + + BUG_ON(!list_empty(&ascb->list)); + spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags); + asd_tc_index_release(&ascb->ha->seq, ascb->tc_index); + spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags); + dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, + ascb->dma_scb.dma_handle); + kmem_cache_free(asd_ascb_cache, ascb); + } +} + +/** + * asd_ascb_list_free -- free a list of ascbs + * @ascb_list: a list of ascbs + * + * This function will free a list of ascbs allocated by asd_ascb_alloc_list. + * It is used when say the scb queueing function returned QUEUE_FULL, + * and we do not need the ascbs any more. + */ +static inline void asd_ascb_free_list(struct asd_ascb *ascb_list) +{ + LIST_HEAD(list); + struct list_head *n, *pos; + + __list_add(&list, ascb_list->list.prev, &ascb_list->list); + list_for_each_safe(pos, n, &list) { + list_del_init(pos); + asd_ascb_free(list_entry(pos, struct asd_ascb, list)); + } +} + +/* ---------- Function declarations ---------- */ + +int asd_init_hw(struct asd_ha_struct *asd_ha); +irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs); + + +struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct + *asd_ha, int *num, + unsigned int gfp_mask); + +int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num); +int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num); + +int asd_init_post_escbs(struct asd_ha_struct *asd_ha); +void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc); +void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op); +void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); +int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); +void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, + u8 subfunc); + +void asd_ascb_timedout(unsigned long data); +int asd_chip_hardrst(struct asd_ha_struct *asd_ha); + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c new file mode 100644 index 0000000..3ec2e46 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -0,0 +1,860 @@ +/* + * Aic94xx SAS/SATA driver initialization. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" + +/* The format is "version.release.patchlevel" */ +#define ASD_DRIVER_VERSION "1.0.2" + +static int use_msi = 0; +module_param_named(use_msi, use_msi, int, S_IRUGO); +MODULE_PARM_DESC(use_msi, "\n" + "\tEnable(1) or disable(0) using PCI MSI.\n" + "\tDefault: 0"); + +static int lldd_max_execute_num = 0; +module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); +MODULE_PARM_DESC(collector, "\n" + "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" + "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" + "\tThe aic94xx SAS LLDD supports both modes.\n" + "\tDefault: 0 (Direct Mode).\n"); + +char sas_addr_str[2*SAS_ADDR_SIZE + 1] = ""; + +static struct scsi_transport_template *aic94xx_transport_template; + +static struct scsi_host_template aic94xx_sht = { + .module = THIS_MODULE, + /* .name is initialized */ + .name = "aic94xx", + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .slave_destroy = sas_slave_destroy, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, +}; + +static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha) +{ + int err, i; + struct asd_ha_addrspace *io_handle; + + asd_ha->iospace = 0; + for (i = 0; i < 3; i += 2) { + io_handle = &asd_ha->io_handle[i==0?0:1]; + io_handle->start = pci_resource_start(asd_ha->pcidev, i); + io_handle->len = pci_resource_len(asd_ha->pcidev, i); + io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); + err = -ENODEV; + if (!io_handle->start || !io_handle->len) { + asd_printk("MBAR%d start or length for %s is 0.\n", + i==0?0:1, pci_name(asd_ha->pcidev)); + goto Err; + } + err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); + if (err) { + asd_printk("couldn't reserve memory region for %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + if (io_handle->flags & IORESOURCE_CACHEABLE) + io_handle->addr = ioremap(io_handle->start, + io_handle->len); + else + io_handle->addr = ioremap_nocache(io_handle->start, + io_handle->len); + if (!io_handle->addr) { + asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, + pci_name(asd_ha->pcidev)); + goto Err_unreq; + } + } + + return 0; +Err_unreq: + pci_release_region(asd_ha->pcidev, i); +Err: + if (i > 0) { + io_handle = &asd_ha->io_handle[0]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 0); + } + return err; +} + +static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha) +{ + struct asd_ha_addrspace *io_handle; + + io_handle = &asd_ha->io_handle[1]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 2); + + io_handle = &asd_ha->io_handle[0]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 0); +} + +static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha) +{ + int i = PCI_IOBAR_OFFSET, err; + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; + + asd_ha->iospace = 1; + io_handle->start = pci_resource_start(asd_ha->pcidev, i); + io_handle->len = pci_resource_len(asd_ha->pcidev, i); + io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); + io_handle->addr = (void __iomem *) io_handle->start; + if (!io_handle->start || !io_handle->len) { + asd_printk("couldn't get IO ports for %s\n", + pci_name(asd_ha->pcidev)); + return -ENODEV; + } + err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); + if (err) { + asd_printk("couldn't reserve io space for %s\n", + pci_name(asd_ha->pcidev)); + } + + return err; +} + +static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha) +{ + pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET); +} + +static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha) +{ + int err; + u16 cmd_reg; + + err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg); + if (err) { + asd_printk("couldn't read command register of %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + + err = -ENODEV; + if (cmd_reg & PCI_COMMAND_MEMORY) { + if ((err = asd_map_memio(asd_ha))) + goto Err; + } else if (cmd_reg & PCI_COMMAND_IO) { + if ((err = asd_map_ioport(asd_ha))) + goto Err; + asd_printk("%s ioport mapped -- upgrade your hardware\n", + pci_name(asd_ha->pcidev)); + } else { + asd_printk("no proper device access to %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + + return 0; +Err: + return err; +} + +static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha) +{ + if (asd_ha->iospace) + asd_unmap_ioport(asd_ha); + else + asd_unmap_memio(asd_ha); +} + +static const char *asd_dev_rev[30] = { + [0] = "A0", + [1] = "A1", + [8] = "B0", +}; + +static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha) +{ + int err, i; + + err = pci_read_config_byte(asd_ha->pcidev, PCI_REVISION_ID, + &asd_ha->revision_id); + if (err) { + asd_printk("couldn't read REVISION ID register of %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + err = -ENODEV; + if (asd_ha->revision_id < AIC9410_DEV_REV_B0) { + asd_printk("%s is revision %s (%X), which is not supported\n", + pci_name(asd_ha->pcidev), + asd_dev_rev[asd_ha->revision_id], + asd_ha->revision_id); + goto Err; + } + /* Provide some sane default values. */ + asd_ha->hw_prof.max_scbs = 512; + asd_ha->hw_prof.max_ddbs = 128; + asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; + /* All phys are enabled, by default. */ + asd_ha->hw_prof.enabled_phys = 0xFF; + for (i = 0; i < ASD_MAX_PHYS; i++) { + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = PHY_LINKRATE_3; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = PHY_LINKRATE_1_5; + asd_ha->hw_prof.phy_desc[i].max_sata_lrate= PHY_LINKRATE_1_5; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate= PHY_LINKRATE_1_5; + } + + return 0; +Err: + return err; +} + +static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha) +{ + int err = asd_common_setup(asd_ha); + + if (err) + return err; + + asd_ha->hw_prof.addr_range = 8; + asd_ha->hw_prof.port_name_base = 0; + asd_ha->hw_prof.dev_name_base = 8; + asd_ha->hw_prof.sata_name_base = 16; + + return 0; +} + +static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha) +{ + int err = asd_common_setup(asd_ha); + + if (err) + return err; + + asd_ha->hw_prof.addr_range = 4; + asd_ha->hw_prof.port_name_base = 0; + asd_ha->hw_prof.dev_name_base = 4; + asd_ha->hw_prof.sata_name_base = 8; + + return 0; +} + +static ssize_t asd_show_dev_rev(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", + asd_dev_rev[asd_ha->revision_id]); +} +static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); + +static ssize_t asd_show_dev_bios_build(struct device *dev, + struct device_attribute *attr,char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld); +} +static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL); + +static ssize_t asd_show_dev_pcba_sn(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn); +} +static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL); + +static void asd_create_dev_attrs(struct asd_ha_struct *asd_ha) +{ + device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); + device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); +} + +static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) +{ + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); +} + +/* The first entry, 0, is used for dynamic ids, the rest for devices + * we know about. + */ +static struct asd_pcidev_struct { + const char * name; + int (*setup)(struct asd_ha_struct *asd_ha); +} asd_pcidev_data[] = { + /* Id 0 is used for dynamic ids. */ + { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter", + .setup = asd_aic9410_setup + }, + { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter", + .setup = asd_aic9410_setup + }, + { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter", + .setup = asd_aic9405_setup + }, +}; + +static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha) +{ + asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool", + &asd_ha->pcidev->dev, + sizeof(struct scb), + 8, 0); + if (!asd_ha->scb_pool) { + asd_printk("couldn't create scb pool\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * asd_free_edbs -- free empty data buffers + * asd_ha: pointer to host adapter structure + */ +static inline void asd_free_edbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_edbs; i++) + asd_free_coherent(asd_ha, seq->edb_arr[i]); + kfree(seq->edb_arr); + seq->edb_arr = NULL; +} + +static inline void asd_free_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_escbs; i++) { + if (!list_empty(&seq->escb_arr[i]->list)) + list_del_init(&seq->escb_arr[i]->list); + + asd_ascb_free(seq->escb_arr[i]); + } + kfree(seq->escb_arr); + seq->escb_arr = NULL; +} + +static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) +{ + int i; + + if (asd_ha->hw_prof.ddb_ext) + asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext); + if (asd_ha->hw_prof.scb_ext) + asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext); + + if (asd_ha->hw_prof.ddb_bitmap) + kfree(asd_ha->hw_prof.ddb_bitmap); + asd_ha->hw_prof.ddb_bitmap = NULL; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_phy *phy = &asd_ha->phys[i]; + + asd_free_coherent(asd_ha, phy->id_frm_tok); + } + if (asd_ha->seq.escb_arr) + asd_free_escbs(asd_ha); + if (asd_ha->seq.edb_arr) + asd_free_edbs(asd_ha); + if (asd_ha->hw_prof.ue.area) { + kfree(asd_ha->hw_prof.ue.area); + asd_ha->hw_prof.ue.area = NULL; + } + if (asd_ha->seq.tc_index_array) { + kfree(asd_ha->seq.tc_index_array); + kfree(asd_ha->seq.tc_index_bitmap); + asd_ha->seq.tc_index_array = NULL; + asd_ha->seq.tc_index_bitmap = NULL; + } + if (asd_ha->seq.actual_dl) { + asd_free_coherent(asd_ha, asd_ha->seq.actual_dl); + asd_ha->seq.actual_dl = NULL; + asd_ha->seq.dl = NULL; + } + if (asd_ha->seq.next_scb.vaddr) { + dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr, + asd_ha->seq.next_scb.dma_handle); + asd_ha->seq.next_scb.vaddr = NULL; + } + dma_pool_destroy(asd_ha->scb_pool); + asd_ha->scb_pool = NULL; +} + +kmem_cache_t *asd_dma_token_cache; +kmem_cache_t *asd_ascb_cache; + +static int asd_create_global_caches(void) +{ + if (!asd_dma_token_cache) { + asd_dma_token_cache + = kmem_cache_create(ASD_DRIVER_NAME "_dma_token", + sizeof(struct asd_dma_tok), + 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!asd_dma_token_cache) { + asd_printk("couldn't create dma token cache\n"); + return -ENOMEM; + } + } + + if (!asd_ascb_cache) { + asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb", + sizeof(struct asd_ascb), + 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!asd_ascb_cache) { + asd_printk("couldn't create ascb cache\n"); + goto Err; + } + } + + return 0; +Err: + kmem_cache_destroy(asd_dma_token_cache); + asd_dma_token_cache = NULL; + return -ENOMEM; +} + +static void asd_destroy_global_caches(void) +{ + if (asd_dma_token_cache) + kmem_cache_destroy(asd_dma_token_cache); + asd_dma_token_cache = NULL; + + if (asd_ascb_cache) + kmem_cache_destroy(asd_ascb_cache); + asd_ascb_cache = NULL; +} + +static int asd_register_sas_ha(struct asd_ha_struct *asd_ha) +{ + int i; + struct asd_sas_phy **sas_phys = + kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_phy), GFP_KERNEL); + struct asd_sas_port **sas_ports = + kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_port), GFP_KERNEL); + + if (!sas_phys || !sas_ports) { + kfree(sas_phys); + kfree(sas_ports); + return -ENOMEM; + } + + asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name; + asd_ha->sas_ha.lldd_module = THIS_MODULE; + asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0]; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + sas_phys[i] = &asd_ha->phys[i].sas_phy; + sas_ports[i] = &asd_ha->ports[i]; + } + + asd_ha->sas_ha.sas_phy = sas_phys; + asd_ha->sas_ha.sas_port= sas_ports; + asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; + + asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; + + return sas_register_ha(&asd_ha->sas_ha); +} + +static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha) +{ + int err; + + err = sas_unregister_ha(&asd_ha->sas_ha); + + sas_remove_host(asd_ha->sas_ha.core.shost); + scsi_remove_host(asd_ha->sas_ha.core.shost); + scsi_host_put(asd_ha->sas_ha.core.shost); + + kfree(asd_ha->sas_ha.sas_phy); + kfree(asd_ha->sas_ha.sas_port); + + return err; +} + +static int __devinit asd_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct asd_pcidev_struct *asd_dev; + unsigned asd_id = (unsigned) id->driver_data; + struct asd_ha_struct *asd_ha; + struct Scsi_Host *shost; + int err; + + if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) { + asd_printk("wrong driver_data in PCI table\n"); + return -ENODEV; + } + + if ((err = pci_enable_device(dev))) { + asd_printk("couldn't enable device %s\n", pci_name(dev)); + return err; + } + + pci_set_master(dev); + + err = -ENOMEM; + + shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *)); + if (!shost) + goto Err; + + asd_dev = &asd_pcidev_data[asd_id]; + + asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL); + if (!asd_ha) { + asd_printk("out of memory\n"); + goto Err; + } + asd_ha->pcidev = dev; + asd_ha->sas_ha.pcidev = asd_ha->pcidev; + asd_ha->sas_ha.lldd_ha = asd_ha; + + asd_ha->name = asd_dev->name; + asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev)); + + SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha; + asd_ha->sas_ha.core.shost = shost; + shost->transportt = aic94xx_transport_template; + shost->max_id = ~0; + shost->max_lun = ~0; + shost->max_cmd_len = 16; + + err = scsi_add_host(shost, &dev->dev); + if (err) { + scsi_host_put(shost); + goto Err_free; + } + + + + err = asd_dev->setup(asd_ha); + if (err) + goto Err_free; + + err = -ENODEV; + if (!pci_set_dma_mask(dev, DMA_64BIT_MASK) + && !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK)) + ; + else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK) + && !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) + ; + else { + asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); + goto Err_free; + } + + pci_set_drvdata(dev, asd_ha); + + err = asd_map_ha(asd_ha); + if (err) + goto Err_free; + + err = asd_create_ha_caches(asd_ha); + if (err) + goto Err_unmap; + + err = asd_init_hw(asd_ha); + if (err) + goto Err_free_cache; + + asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled " + "phys, flash %s, BIOS %s%d\n", + pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr), + asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys, + asd_ha->hw_prof.num_phys, + asd_ha->hw_prof.flash.present ? "present" : "not present", + asd_ha->hw_prof.bios.present ? "build " : "not present", + asd_ha->hw_prof.bios.bld); + + if (use_msi) + pci_enable_msi(asd_ha->pcidev); + + err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, SA_SHIRQ, + ASD_DRIVER_NAME, asd_ha); + if (err) { + asd_printk("couldn't get irq %d for %s\n", + asd_ha->pcidev->irq, pci_name(asd_ha->pcidev)); + goto Err_irq; + } + asd_enable_ints(asd_ha); + + err = asd_init_post_escbs(asd_ha); + if (err) { + asd_printk("couldn't post escbs for %s\n", + pci_name(asd_ha->pcidev)); + goto Err_escbs; + } + ASD_DPRINTK("escbs posted\n"); + + asd_create_dev_attrs(asd_ha); + + err = asd_register_sas_ha(asd_ha); + if (err) + goto Err_reg_sas; + + err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); + if (err) { + asd_printk("coudln't enable phys, err:%d\n", err); + goto Err_en_phys; + } + ASD_DPRINTK("enabled phys\n"); + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + ssleep(1); + /* Wait for discovery to finish */ + scsi_flush_work(asd_ha->sas_ha.core.shost); + + return 0; +Err_en_phys: + asd_unregister_sas_ha(asd_ha); +Err_reg_sas: + asd_remove_dev_attrs(asd_ha); +Err_escbs: + asd_disable_ints(asd_ha); + free_irq(dev->irq, asd_ha); +Err_irq: + if (use_msi) + pci_disable_msi(dev); + asd_chip_hardrst(asd_ha); +Err_free_cache: + asd_destroy_ha_caches(asd_ha); +Err_unmap: + asd_unmap_ha(asd_ha); +Err_free: + kfree(asd_ha); + scsi_remove_host(shost); +Err: + pci_disable_device(dev); + return err; +} + +static void asd_free_queues(struct asd_ha_struct *asd_ha) +{ + unsigned long flags; + LIST_HEAD(pending); + struct list_head *n, *pos; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_ha->seq.pending = 0; + list_splice_init(&asd_ha->seq.pend_q, &pending); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + if (!list_empty(&pending)) + ASD_DPRINTK("Uh-oh! Pending is not empty!\n"); + + list_for_each_safe(pos, n, &pending) { + struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); + list_del_init(pos); + ASD_DPRINTK("freeing from pending\n"); + asd_ascb_free(ascb); + } +} + +static void asd_turn_off_leds(struct asd_ha_struct *asd_ha) +{ + u8 phy_mask = asd_ha->hw_prof.enabled_phys; + u8 i; + + for_each_phy(phy_mask, phy_mask, i) { + asd_turn_led(asd_ha, i, 0); + asd_control_led(asd_ha, i, 0); + } +} + +static void __devexit asd_pci_remove(struct pci_dev *dev) +{ + struct asd_ha_struct *asd_ha = pci_get_drvdata(dev); + + if (!asd_ha) + return; + + asd_unregister_sas_ha(asd_ha); + + asd_disable_ints(asd_ha); + + asd_remove_dev_attrs(asd_ha); + + /* XXX more here as needed */ + + free_irq(dev->irq, asd_ha); + if (use_msi) + pci_disable_msi(asd_ha->pcidev); + asd_turn_off_leds(asd_ha); + asd_chip_hardrst(asd_ha); + asd_free_queues(asd_ha); + asd_destroy_ha_caches(asd_ha); + asd_unmap_ha(asd_ha); + kfree(asd_ha); + pci_disable_device(dev); + return; +} + +static ssize_t asd_version_show(struct device_driver *driver, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); +} +static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL); + +static void asd_create_driver_attrs(struct device_driver *driver) +{ + driver_create_file(driver, &driver_attr_version); +} + +static void asd_remove_driver_attrs(struct device_driver *driver) +{ + driver_remove_file(driver, &driver_attr_version); +} + +static struct sas_domain_function_template aic94xx_transport_functions = { + .lldd_port_formed = asd_update_port_links, + + .lldd_dev_found = asd_dev_found, + .lldd_dev_gone = asd_dev_gone, + + .lldd_execute_task = asd_execute_task, + + .lldd_abort_task = asd_abort_task, + .lldd_abort_task_set = asd_abort_task_set, + .lldd_clear_aca = asd_clear_aca, + .lldd_clear_task_set = asd_clear_task_set, + .lldd_I_T_nexus_reset = NULL, + .lldd_lu_reset = asd_lu_reset, + .lldd_query_task = asd_query_task, + + .lldd_clear_nexus_port = asd_clear_nexus_port, + .lldd_clear_nexus_ha = asd_clear_nexus_ha, + + .lldd_control_phy = asd_control_phy, +}; + +static const struct pci_device_id aic94xx_pci_table[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR10), + 0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR12), + 0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1E), + 0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR30), + 0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR32), + 0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3E), + 0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3F), + 0, 0, 2}, + {} +}; + +MODULE_DEVICE_TABLE(pci, aic94xx_pci_table); + +static struct pci_driver aic94xx_pci_driver = { + .name = ASD_DRIVER_NAME, + .id_table = aic94xx_pci_table, + .probe = asd_pci_probe, + .remove = __devexit_p(asd_pci_remove), +}; + +static int __init aic94xx_init(void) +{ + int err; + + + asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION, + ASD_DRIVER_VERSION); + + err = asd_create_global_caches(); + if (err) + return err; + + aic94xx_transport_template = + sas_domain_attach_transport(&aic94xx_transport_functions); + if (err) + goto out_destroy_caches; + + err = pci_register_driver(&aic94xx_pci_driver); + if (err) + goto out_release_transport; + + asd_create_driver_attrs(&aic94xx_pci_driver.driver); + + return err; + + out_release_transport: + sas_release_transport(aic94xx_transport_template); + out_destroy_caches: + asd_destroy_global_caches(); + + return err; +} + +static void __exit aic94xx_exit(void) +{ + asd_remove_driver_attrs(&aic94xx_pci_driver.driver); + pci_unregister_driver(&aic94xx_pci_driver); + sas_release_transport(aic94xx_transport_template); + asd_destroy_global_caches(); + asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, + ASD_DRIVER_VERSION); +} + +module_init(aic94xx_init); +module_exit(aic94xx_exit); + +MODULE_AUTHOR("Luben Tuikov "); +MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(ASD_DRIVER_VERSION); diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c new file mode 100644 index 0000000..f210dac --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg.c @@ -0,0 +1,332 @@ +/* + * Aic94xx SAS/SATA driver register access. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include "aic94xx_reg.h" +#include "aic94xx.h" + +/* Writing to device address space. + * Offset comes before value to remind that the operation of + * this function is *offs = val. + */ +static inline void asd_write_byte(struct asd_ha_struct *asd_ha, + unsigned long offs, u8 val) +{ + if (unlikely(asd_ha->iospace)) + outb(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writeb(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +static inline void asd_write_word(struct asd_ha_struct *asd_ha, + unsigned long offs, u16 val) +{ + if (unlikely(asd_ha->iospace)) + outw(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writew(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +static inline void asd_write_dword(struct asd_ha_struct *asd_ha, + unsigned long offs, u32 val) +{ + if (unlikely(asd_ha->iospace)) + outl(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writel(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +/* Reading from device address space. + */ +static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u8 val; + if (unlikely(asd_ha->iospace)) + val = inb((unsigned long) asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readb(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u16 asd_read_word(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u16 val; + if (unlikely(asd_ha->iospace)) + val = inw((unsigned long)asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readw(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u32 val; + if (unlikely(asd_ha->iospace)) + val = inl((unsigned long) asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readl(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u32 asd_mem_offs_swa(void) +{ + return 0; +} + +static inline u32 asd_mem_offs_swc(void) +{ + return asd_mem_offs_swa() + MBAR0_SWA_SIZE; +} + +static inline u32 asd_mem_offs_swb(void) +{ + return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20; +} + +/* We know that the register wanted is in the range + * of the sliding window. + */ +#define ASD_READ_SW(ww, type, ord) \ +static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\ + u32 reg) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ + u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\ + return asd_read_##ord (asd_ha, (unsigned long) map_offs); \ +} + +#define ASD_WRITE_SW(ww, type, ord) \ +static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\ + u32 reg, type val) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ + u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\ + asd_write_##ord (asd_ha, (unsigned long) map_offs, val); \ +} + +ASD_READ_SW(swa, u8, byte); +ASD_READ_SW(swa, u16, word); +ASD_READ_SW(swa, u32, dword); + +ASD_READ_SW(swb, u8, byte); +ASD_READ_SW(swb, u16, word); +ASD_READ_SW(swb, u32, dword); + +ASD_READ_SW(swc, u8, byte); +ASD_READ_SW(swc, u16, word); +ASD_READ_SW(swc, u32, dword); + +ASD_WRITE_SW(swa, u8, byte); +ASD_WRITE_SW(swa, u16, word); +ASD_WRITE_SW(swa, u32, dword); + +ASD_WRITE_SW(swb, u8, byte); +ASD_WRITE_SW(swb, u16, word); +ASD_WRITE_SW(swb, u32, dword); + +ASD_WRITE_SW(swc, u8, byte); +ASD_WRITE_SW(swc, u16, word); +ASD_WRITE_SW(swc, u32, dword); + +/* + * A word about sliding windows: + * MBAR0 is divided into sliding windows A, C and B, in that order. + * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes. + * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes. + * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F. + * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0. + * See asd_init_sw() in aic94xx_hwi.c + * + * We map the most common registers we'd access of the internal 4GB + * host adapter memory space. If a register/internal memory location + * is wanted which is not mapped, we slide SWB, by paging it, + * see asd_move_swb() in aic94xx_reg.c. + */ + +/** + * asd_move_swb -- move sliding window B + * @asd_ha: pointer to host adapter structure + * @reg: register desired to be within range of the new window + */ +static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg) +{ + u32 base = reg & ~(MBAR0_SWB_SIZE-1); + pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base); + asd_ha->io_handle[0].swb_base = base; +} + +static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val) +{ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); + if (io_handle->swa_base <= reg + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) + asd_write_swa_byte (asd_ha, reg,val); + else if (io_handle->swb_base <= reg + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) + asd_write_swb_byte (asd_ha, reg, val); + else if (io_handle->swc_base <= reg + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) + asd_write_swc_byte (asd_ha, reg, val); + else { + /* Ok, we have to move SWB */ + asd_move_swb(asd_ha, reg); + asd_write_swb_byte (asd_ha, reg, val); + } +} + +#define ASD_WRITE_REG(type, ord) \ +void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\ +{ \ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ + unsigned long flags; \ + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ + spin_lock_irqsave(&asd_ha->iolock, flags); \ + if (io_handle->swa_base <= reg \ + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ + asd_write_swa_##ord (asd_ha, reg,val); \ + else if (io_handle->swb_base <= reg \ + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ + asd_write_swb_##ord (asd_ha, reg, val); \ + else if (io_handle->swc_base <= reg \ + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ + asd_write_swc_##ord (asd_ha, reg, val); \ + else { \ + /* Ok, we have to move SWB */ \ + asd_move_swb(asd_ha, reg); \ + asd_write_swb_##ord (asd_ha, reg, val); \ + } \ + spin_unlock_irqrestore(&asd_ha->iolock, flags); \ +} + +ASD_WRITE_REG(u8, byte); +ASD_WRITE_REG(u16,word); +ASD_WRITE_REG(u32,dword); + +static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg) +{ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; + u8 val; + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); + if (io_handle->swa_base <= reg + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) + val = asd_read_swa_byte (asd_ha, reg); + else if (io_handle->swb_base <= reg + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) + val = asd_read_swb_byte (asd_ha, reg); + else if (io_handle->swc_base <= reg + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) + val = asd_read_swc_byte (asd_ha, reg); + else { + /* Ok, we have to move SWB */ + asd_move_swb(asd_ha, reg); + val = asd_read_swb_byte (asd_ha, reg); + } + return val; +} + +#define ASD_READ_REG(type, ord) \ +type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \ +{ \ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ + type val; \ + unsigned long flags; \ + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ + spin_lock_irqsave(&asd_ha->iolock, flags); \ + if (io_handle->swa_base <= reg \ + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ + val = asd_read_swa_##ord (asd_ha, reg); \ + else if (io_handle->swb_base <= reg \ + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ + val = asd_read_swb_##ord (asd_ha, reg); \ + else if (io_handle->swc_base <= reg \ + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ + val = asd_read_swc_##ord (asd_ha, reg); \ + else { \ + /* Ok, we have to move SWB */ \ + asd_move_swb(asd_ha, reg); \ + val = asd_read_swb_##ord (asd_ha, reg); \ + } \ + spin_unlock_irqrestore(&asd_ha->iolock, flags); \ + return val; \ +} + +ASD_READ_REG(u8, byte); +ASD_READ_REG(u16,word); +ASD_READ_REG(u32,dword); + +/** + * asd_read_reg_string -- read a string of bytes from io space memory + * @asd_ha: pointer to host adapter structure + * @dst: pointer to a destination buffer where data will be written to + * @offs: start offset (register) to read from + * @count: number of bytes to read + */ +void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, + u32 offs, int count) +{ + u8 *p = dst; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->iolock, flags); + for ( ; count > 0; count--, offs++, p++) + *p = __asd_read_reg_byte(asd_ha, offs); + spin_unlock_irqrestore(&asd_ha->iolock, flags); +} + +/** + * asd_write_reg_string -- write a string of bytes to io space memory + * @asd_ha: pointer to host adapter structure + * @src: pointer to source buffer where data will be read from + * @offs: start offset (register) to write to + * @count: number of bytes to write + */ +void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, + u32 offs, int count) +{ + u8 *p = src; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->iolock, flags); + for ( ; count > 0; count--, offs++, p++) + __asd_write_reg_byte(asd_ha, offs, *p); + spin_unlock_irqrestore(&asd_ha->iolock, flags); +} diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h new file mode 100644 index 0000000..2279307 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg.h @@ -0,0 +1,302 @@ +/* + * Aic94xx SAS/SATA driver hardware registers definitions. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_REG_H_ +#define _AIC94XX_REG_H_ + +#include +#include "aic94xx_hwi.h" + +/* Values */ +#define AIC9410_DEV_REV_B0 0x8 + +/* MBAR0, SWA, SWB, SWC, internal memory space addresses */ +#define REG_BASE_ADDR 0xB8000000 +#define REG_BASE_ADDR_CSEQCIO 0xB8002000 +#define REG_BASE_ADDR_EXSI 0xB8042800 + +#define MBAR0_SWA_SIZE 0x58 +extern u32 MBAR0_SWB_SIZE; +#define MBAR0_SWC_SIZE 0x8 + +/* MBAR1, points to On Chip Memory */ +#define OCM_BASE_ADDR 0xA0000000 +#define OCM_MAX_SIZE 0x20000 + +/* Smallest address possible to reference */ +#define ALL_BASE_ADDR OCM_BASE_ADDR + +/* PCI configuration space registers */ +#define PCI_IOBAR_OFFSET 4 + +#define PCI_CONF_MBAR1 0x6C +#define PCI_CONF_MBAR0_SWA 0x70 +#define PCI_CONF_MBAR0_SWB 0x74 +#define PCI_CONF_MBAR0_SWC 0x78 +#define PCI_CONF_MBAR_KEY 0x7C +#define PCI_CONF_FLSH_BAR 0xB8 + +#include "aic94xx_reg_def.h" + +u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg); +u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg); +u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg); + +void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val); +void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val); +void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val); + +void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, + u32 offs, int count); +void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, + u32 offs, int count); + +#define ASD_READ_OCM(type, ord, S) \ +static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \ + u32 offs) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \ + type val = read##S (io_handle->addr + (unsigned long) offs); \ + rmb(); \ + return val; \ +} + +ASD_READ_OCM(u8, byte, b); +ASD_READ_OCM(u16,word, w); +ASD_READ_OCM(u32,dword,l); + +#define ASD_WRITE_OCM(type, ord, S) \ +static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \ + u32 offs, type val) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \ + write##S (val, io_handle->addr + (unsigned long) offs); \ + return; \ +} + +ASD_WRITE_OCM(u8, byte, b); +ASD_WRITE_OCM(u16,word, w); +ASD_WRITE_OCM(u32,dword,l); + +#define ASD_DDBSITE_READ(type, ord) \ +static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \ + u16 ddb_site_no, \ + u16 offs) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \ + asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \ + return asd_read_reg_##ord (asd_ha, CTXACCESS); \ +} + +ASD_DDBSITE_READ(u32, dword); +ASD_DDBSITE_READ(u16, word); + +static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, + u16 offs) +{ + if (offs & 1) + return asd_ddbsite_read_word(asd_ha, ddb_site_no, + offs & ~1) >> 8; + else + return asd_ddbsite_read_word(asd_ha, ddb_site_no, + offs) & 0xFF; +} + + +#define ASD_DDBSITE_WRITE(type, ord) \ +static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \ + u16 ddb_site_no, \ + u16 offs, type val) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \ + asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \ + asd_write_reg_##ord (asd_ha, CTXACCESS, val); \ +} + +ASD_DDBSITE_WRITE(u32, dword); +ASD_DDBSITE_WRITE(u16, word); + +static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, + u16 offs, u8 val) +{ + u16 base = offs & ~1; + u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base); + if (offs & 1) + rval = (val << 8) | (rval & 0xFF); + else + rval = (rval & 0xFF00) | val; + asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval); +} + + +#define ASD_SCBSITE_READ(type, ord) \ +static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \ + u16 scb_site_no, \ + u16 offs) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \ + asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \ + return asd_read_reg_##ord (asd_ha, CTXACCESS); \ +} + +ASD_SCBSITE_READ(u32, dword); +ASD_SCBSITE_READ(u16, word); + +static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha, + u16 scb_site_no, + u16 offs) +{ + if (offs & 1) + return asd_scbsite_read_word(asd_ha, scb_site_no, + offs & ~1) >> 8; + else + return asd_scbsite_read_word(asd_ha, scb_site_no, + offs) & 0xFF; +} + + +#define ASD_SCBSITE_WRITE(type, ord) \ +static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \ + u16 scb_site_no, \ + u16 offs, type val) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \ + asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \ + asd_write_reg_##ord (asd_ha, CTXACCESS, val); \ +} + +ASD_SCBSITE_WRITE(u32, dword); +ASD_SCBSITE_WRITE(u16, word); + +static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha, + u16 scb_site_no, + u16 offs, u8 val) +{ + u16 base = offs & ~1; + u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base); + if (offs & 1) + rval = (val << 8) | (rval & 0xFF); + else + rval = (rval & 0xFF00) | val; + asd_scbsite_write_word(asd_ha, scb_site_no, base, rval); +} + +/** + * asd_ddbsite_update_word -- atomically update a word in a ddb site + * @asd_ha: pointer to host adapter structure + * @ddb_site_no: the DDB site number + * @offs: the offset into the DDB + * @oldval: old value found in that offset + * @newval: the new value to replace it + * + * This function is used when the sequencers are running and we need to + * update a DDB site atomically without expensive pausing and upausing + * of the sequencers and accessing the DDB site through the CIO bus. + * + * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value + * is different than the current value at that offset. + */ +static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, u16 offs, + u16 oldval, u16 newval) +{ + u8 done; + u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs); + if (oval != oldval) + return -EAGAIN; + asd_write_reg_word(asd_ha, AOLDDATA, oldval); + asd_write_reg_word(asd_ha, ANEWDATA, newval); + do { + done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL); + } while (!(done & ATOMICDONE)); + if (done & ATOMICERR) + return -EFAULT; /* parity error */ + else if (done & ATOMICWIN) + return 0; /* success */ + else + return -EAGAIN; /* oldval different than current value */ +} + +static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, u16 offs, + u8 _oldval, u8 _newval) +{ + u16 base = offs & ~1; + u16 oval; + u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base); + if (offs & 1) { + if ((nval >> 8) != _oldval) + return -EAGAIN; + nval = (_newval << 8) | (nval & 0xFF); + oval = (_oldval << 8) | (nval & 0xFF); + } else { + if ((nval & 0xFF) != _oldval) + return -EAGAIN; + nval = (nval & 0xFF00) | _newval; + oval = (nval & 0xFF00) | _oldval; + } + return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval); +} + +static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg, + dma_addr_t dma_handle) +{ + asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle)); + asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle)); +} + +static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha) +{ + /* DCHREVISION returns 0, possibly broken */ + u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE; + return ctxmemsize ? 65536 : 32768; +} + +static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha) +{ + u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE; + return ctxmemsize ? 8192 : 4096; +} + +static inline void asd_disable_ints(struct asd_ha_struct *asd_ha) +{ + asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); +} + +static inline void asd_enable_ints(struct asd_ha_struct *asd_ha) +{ + /* Enable COM SAS interrupt on errors, COMSTAT */ + asd_write_reg_dword(asd_ha, COMSTATEN, + EN_CSBUFPERR | EN_CSERR | EN_OVLYERR); + /* Enable DCH SAS CFIFTOERR */ + asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR); + /* Enable Host Device interrupts */ + asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN); +} + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h new file mode 100644 index 0000000..b79f45f --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h @@ -0,0 +1,2398 @@ +/* + * Aic94xx SAS/SATA driver hardware registers defintions. + * + * Copyright (C) 2004 Adaptec, Inc. All rights reserved. + * Copyright (C) 2004 David Chaw + * Copyright (C) 2005 Luben Tuikov + * + * Luben Tuikov: Some register value updates to make it work with the window + * agnostic register r/w functions. Some register corrections, sizes, + * etc. + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $ + * + */ + +#ifndef _ADP94XX_REG_DEF_H_ +#define _ADP94XX_REG_DEF_H_ + +/* + * Common definitions. + */ +#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */ +#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */ +#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */ + +/********************* COM_SAS registers definition *************************/ + +/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h. + */ + +/* + * CHIM Registers, Address Range : (0x00-0xFF) + */ +#define COMBIST (REG_BASE_ADDR + 0x00) + +/* bits 31:24 */ +#define L7BLKRST 0x80000000 +#define L6BLKRST 0x40000000 +#define L5BLKRST 0x20000000 +#define L4BLKRST 0x10000000 +#define L3BLKRST 0x08000000 +#define L2BLKRST 0x04000000 +#define L1BLKRST 0x02000000 +#define L0BLKRST 0x01000000 +#define LmBLKRST 0xFF000000 +#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid)) + +#define OCMBLKRST 0x00400000 +#define CTXMEMBLKRST 0x00200000 +#define CSEQBLKRST 0x00100000 +#define EXSIBLKRST 0x00040000 +#define DPIBLKRST 0x00020000 +#define DFIFBLKRST 0x00010000 +#define HARDRST 0x00000200 +#define COMBLKRST 0x00000100 +#define FRCDFPERR 0x00000080 +#define FRCCIOPERR 0x00000020 +#define FRCBISTERR 0x00000010 +#define COMBISTEN 0x00000004 +#define COMBISTDONE 0x00000002 /* ro */ +#define COMBISTFAIL 0x00000001 /* ro */ + +#define COMSTAT (REG_BASE_ADDR + 0x04) + +#define REQMBXREAD 0x00000040 +#define RSPMBXAVAIL 0x00000020 +#define CSBUFPERR 0x00000008 +#define OVLYERR 0x00000004 +#define CSERR 0x00000002 +#define OVLYDMADONE 0x00000001 + +#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \ + CSBUFPERR | OVLYERR | CSERR |\ + OVLYDMADONE) + +#define COMSTATEN (REG_BASE_ADDR + 0x08) + +#define EN_REQMBXREAD 0x00000040 +#define EN_RSPMBXAVAIL 0x00000020 +#define EN_CSBUFPERR 0x00000008 +#define EN_OVLYERR 0x00000004 +#define EN_CSERR 0x00000002 +#define EN_OVLYDONE 0x00000001 + +#define SCBPRO (REG_BASE_ADDR + 0x0C) + +#define SCBCONS_MASK 0xFFFF0000 +#define SCBPRO_MASK 0x0000FFFF + +#define CHIMREQMBX (REG_BASE_ADDR + 0x10) + +#define CHIMRSPMBX (REG_BASE_ADDR + 0x14) + +#define CHIMINT (REG_BASE_ADDR + 0x18) + +#define EXT_INT0 0x00000800 +#define EXT_INT1 0x00000400 +#define PORRSTDET 0x00000200 +#define HARDRSTDET 0x00000100 +#define DLAVAILQ 0x00000080 /* ro */ +#define HOSTERR 0x00000040 +#define INITERR 0x00000020 +#define DEVINT 0x00000010 +#define COMINT 0x00000008 +#define DEVTIMER2 0x00000004 +#define DEVTIMER1 0x00000002 +#define DLAVAIL 0x00000001 + +#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\ + DEVTIMER2 | DEVTIMER1 | DLAVAIL) + +#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT) + +#define CHIMINTEN (REG_BASE_ADDR + 0x1C) + +#define RST_EN_EXT_INT1 0x01000000 +#define RST_EN_EXT_INT0 0x00800000 +#define RST_EN_HOSTERR 0x00400000 +#define RST_EN_INITERR 0x00200000 +#define RST_EN_DEVINT 0x00100000 +#define RST_EN_COMINT 0x00080000 +#define RST_EN_DEVTIMER2 0x00040000 +#define RST_EN_DEVTIMER1 0x00020000 +#define RST_EN_DLAVAIL 0x00010000 +#define SET_EN_EXT_INT1 0x00000100 +#define SET_EN_EXT_INT0 0x00000080 +#define SET_EN_HOSTERR 0x00000040 +#define SET_EN_INITERR 0x00000020 +#define SET_EN_DEVINT 0x00000010 +#define SET_EN_COMINT 0x00000008 +#define SET_EN_DEVTIMER2 0x00000004 +#define SET_EN_DEVTIMER1 0x00000002 +#define SET_EN_DLAVAIL 0x00000001 + +#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \ + RST_EN_DEVINT | RST_EN_COMINT | \ + RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\ + RST_EN_DLAVAIL) + +#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\ + SET_EN_DEVINT | SET_EN_COMINT |\ + SET_EN_DLAVAIL) + +#define OVLYDMACTL (REG_BASE_ADDR + 0x20) + +#define OVLYADR_MASK 0x07FF0000 +#define OVLYLSEQ_MASK 0x0000FF00 +#define OVLYCSEQ 0x00000080 +#define OVLYHALTERR 0x00000040 +#define PIOCMODE 0x00000020 +#define RESETOVLYDMA 0x00000008 /* wo */ +#define STARTOVLYDMA 0x00000004 +#define STOPOVLYDMA 0x00000002 /* wo */ +#define OVLYDMAACT 0x00000001 /* ro */ + +#define OVLYDMACNT (REG_BASE_ADDR + 0x24) + +#define OVLYDOMAIN1 0x20000000 /* ro */ +#define OVLYDOMAIN0 0x10000000 +#define OVLYBUFADR_MASK 0x007F0000 +#define OVLYDMACNT_MASK 0x00003FFF + +#define OVLYDMAADR (REG_BASE_ADDR + 0x28) + +#define DMAERR (REG_BASE_ADDR + 0x30) + +#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */ +#define CSERRSTAT_MASK 0x000000FF /* ro */ + +#define SPIODATA (REG_BASE_ADDR + 0x34) + +/* 0x38 - 0x3C are reserved */ + +#define T1CNTRLR (REG_BASE_ADDR + 0x40) + +#define T1DONE 0x00010000 /* ro */ +#define TIMER64 0x00000400 +#define T1ENABLE 0x00000200 +#define T1RELOAD 0x00000100 +#define T1PRESCALER_MASK 0x00000003 + +#define T1CMPR (REG_BASE_ADDR + 0x44) + +#define T1CNTR (REG_BASE_ADDR + 0x48) + +#define T2CNTRLR (REG_BASE_ADDR + 0x4C) + +#define T2DONE 0x00010000 /* ro */ +#define T2ENABLE 0x00000200 +#define T2RELOAD 0x00000100 +#define T2PRESCALER_MASK 0x00000003 + +#define T2CMPR (REG_BASE_ADDR + 0x50) + +#define T2CNTR (REG_BASE_ADDR + 0x54) + +/* 0x58h - 0xFCh are reserved */ + +/* + * DCH_SAS Registers, Address Range : (0x800-0xFFF) + */ +#define CMDCTXBASE (REG_BASE_ADDR + 0x800) + +#define DEVCTXBASE (REG_BASE_ADDR + 0x808) + +#define CTXDOMAIN (REG_BASE_ADDR + 0x810) + +#define DEVCTXDOMAIN1 0x00000008 /* ro */ +#define DEVCTXDOMAIN0 0x00000004 +#define CMDCTXDOMAIN1 0x00000002 /* ro */ +#define CMDCTXDOMAIN0 0x00000001 + +#define DCHCTL (REG_BASE_ADDR + 0x814) + +#define OCMBISTREPAIR 0x00080000 +#define OCMBISTEN 0x00040000 +#define OCMBISTDN 0x00020000 /* ro */ +#define OCMBISTFAIL 0x00010000 /* ro */ +#define DDBBISTEN 0x00004000 +#define DDBBISTDN 0x00002000 /* ro */ +#define DDBBISTFAIL 0x00001000 /* ro */ +#define SCBBISTEN 0x00000400 +#define SCBBISTDN 0x00000200 /* ro */ +#define SCBBISTFAIL 0x00000100 /* ro */ + +#define MEMSEL_MASK 0x000000E0 +#define MEMSEL_CCM_LSEQ 0x00000000 +#define MEMSEL_CCM_IOP 0x00000020 +#define MEMSEL_CCM_SASCTL 0x00000040 +#define MEMSEL_DCM_LSEQ 0x00000060 +#define MEMSEL_DCM_IOP 0x00000080 +#define MEMSEL_OCM 0x000000A0 + +#define FRCERR 0x00000010 +#define AUTORLS 0x00000001 + +#define DCHREVISION (REG_BASE_ADDR + 0x818) + +#define DCHREVISION_MASK 0x000000FF + +#define DCHSTATUS (REG_BASE_ADDR + 0x81C) + +#define EN_CFIFTOERR 0x00020000 +#define CFIFTOERR 0x00000200 +#define CSEQINT 0x00000100 /* ro */ +#define LSEQ7INT 0x00000080 /* ro */ +#define LSEQ6INT 0x00000040 /* ro */ +#define LSEQ5INT 0x00000020 /* ro */ +#define LSEQ4INT 0x00000010 /* ro */ +#define LSEQ3INT 0x00000008 /* ro */ +#define LSEQ2INT 0x00000004 /* ro */ +#define LSEQ1INT 0x00000002 /* ro */ +#define LSEQ0INT 0x00000001 /* ro */ + +#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\ + LSEQ4INT | LSEQ3INT | LSEQ2INT |\ + LSEQ1INT | LSEQ0INT) + +#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820) +#define ENFAIRMST 0x00FF0000 +#define DISWRMST9 0x00000200 +#define DISWRMST8 0x00000100 +#define DISRDMST 0x000000FF + +#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824) +/* 8 bit wide */ +#define AUTOINC 0x80 +#define ATOMICERR 0x04 +#define ATOMICWIN 0x02 +#define ATOMICDONE 0x01 + + +#define ALTCIOADR (REG_BASE_ADDR + 0x828) +/* 16 bit; bits 8:0 define CIO addr space of CSEQ */ + +#define ASCBPTR (REG_BASE_ADDR + 0x82C) +/* 16 bit wide */ + +#define ADDBPTR (REG_BASE_ADDR + 0x82E) +/* 16 bit wide */ + +#define ANEWDATA (REG_BASE_ADDR + 0x830) +/* 16 bit */ + +#define AOLDDATA (REG_BASE_ADDR + 0x834) +/* 16 bit */ + +#define CTXACCESS (REG_BASE_ADDR + 0x838) +/* 32 bit */ + +/* 0x83Ch - 0xFFCh are reserved */ + +/* + * ARP2 External Processor Registers, Address Range : (0x00-0x1F) + */ +#define ARP2CTL 0x00 + +#define FRCSCRPERR 0x00040000 +#define FRCARP2PERR 0x00020000 +#define FRCARP2ILLOPC 0x00010000 +#define ENWAITTO 0x00008000 +#define PERRORDIS 0x00004000 +#define FAILDIS 0x00002000 +#define CIOPERRDIS 0x00001000 +#define BREAKEN3 0x00000800 +#define BREAKEN2 0x00000400 +#define BREAKEN1 0x00000200 +#define BREAKEN0 0x00000100 +#define EPAUSE 0x00000008 +#define PAUSED 0x00000004 /* ro */ +#define STEP 0x00000002 +#define ARP2RESET 0x00000001 /* wo */ + +#define ARP2INT 0x04 + +#define HALTCODE_MASK 0x00FF0000 /* ro */ +#define ARP2WAITTO 0x00000100 +#define ARP2HALTC 0x00000080 +#define ARP2ILLOPC 0x00000040 +#define ARP2PERR 0x00000020 +#define ARP2CIOPERR 0x00000010 +#define ARP2BREAK3 0x00000008 +#define ARP2BREAK2 0x00000004 +#define ARP2BREAK1 0x00000002 +#define ARP2BREAK0 0x00000001 + +#define ARP2INTEN 0x08 + +#define EN_ARP2WAITTO 0x00000100 +#define EN_ARP2HALTC 0x00000080 +#define EN_ARP2ILLOPC 0x00000040 +#define EN_ARP2PERR 0x00000020 +#define EN_ARP2CIOPERR 0x00000010 +#define EN_ARP2BREAK3 0x00000008 +#define EN_ARP2BREAK2 0x00000004 +#define EN_ARP2BREAK1 0x00000002 +#define EN_ARP2BREAK0 0x00000001 + +#define ARP2BREAKADR01 0x0C + +#define BREAKADR1_MASK 0x0FFF0000 +#define BREAKADR0_MASK 0x00000FFF + +#define ARP2BREAKADR23 0x10 + +#define BREAKADR3_MASK 0x0FFF0000 +#define BREAKADR2_MASK 0x00000FFF + +/* 0x14h - 0x1Ch are reserved */ + +/* + * ARP2 Registers, Address Range : (0x00-0x1F) + * The definitions have the same address offset for CSEQ and LmSEQ + * CIO Bus Registers. + */ +#define MODEPTR 0x00 + +#define DSTMODE 0xF0 +#define SRCMODE 0x0F + +#define ALTMODE 0x01 + +#define ALTDMODE 0xF0 +#define ALTSMODE 0x0F + +#define ATOMICXCHG 0x02 + +#define FLAG 0x04 + +#define INTCODE_MASK 0xF0 +#define ALTMODEV2 0x04 +#define CARRY_INT 0x02 +#define CARRY 0x01 + +#define ARP2INTCTL 0x05 + +#define PAUSEDIS 0x80 +#define RSTINTCTL 0x40 +#define POPALTMODE 0x08 +#define ALTMODEV 0x04 +#define INTMASK 0x02 +#define IRET 0x01 + +#define STACK 0x06 + +#define FUNCTION1 0x07 + +#define PRGMCNT 0x08 + +#define ACCUM 0x0A + +#define SINDEX 0x0C + +#define DINDEX 0x0E + +#define ALLONES 0x10 + +#define ALLZEROS 0x11 + +#define SINDIR 0x12 + +#define DINDIR 0x13 + +#define JUMLDIR 0x14 + +#define ARP2HALTCODE 0x15 + +#define CURRADDR 0x16 + +#define LASTADDR 0x18 + +#define NXTLADDR 0x1A + +#define DBGPORTPTR 0x1C + +#define DBGPORT 0x1D + +/* + * CIO Registers. + * The definitions have the same address offset for CSEQ and LmSEQ + * CIO Bus Registers. + */ +#define MnSCBPTR 0x20 + +#define MnDDBPTR 0x22 + +#define SCRATCHPAGE 0x24 + +#define MnSCRATCHPAGE 0x25 + +#define SCRATCHPAGESV 0x26 + +#define MnSCRATCHPAGESV 0x27 + +#define MnDMAERRS 0x46 + +#define MnSGDMAERRS 0x47 + +#define MnSGBUF 0x53 + +#define MnSGDMASTAT 0x5b + +#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDMAENG 0x60 + +#define MnPIPECTL 0x61 + +#define MnSGBADR 0x65 + +#define MnSCB_SITE 0x100 + +#define MnDDB_SITE 0x180 + +/* + * The common definitions below have the same address offset for both + * CSEQ and LmSEQ. + */ +#define BISTCTL0 0x4C + +#define BISTCTL1 0x50 + +#define MAPPEDSCR 0x800 + +/* + * CSEQ Host Register, Address Range : (0x000-0xFFC) + */ +#define CSEQ_HOST_REG_BASE_ADR 0xB8001000 + +#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL) + +#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT) + +#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN) + +#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01) + +#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23) + +#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1) + +#define CSEQRAMBISTEN 0x00000040 +#define CSEQRAMBISTDN 0x00000020 /* ro */ +#define CSEQRAMBISTFAIL 0x00000010 /* ro */ +#define CSEQSCRBISTEN 0x00000004 +#define CSEQSCRBISTDN 0x00000002 /* ro */ +#define CSEQSCRBISTFAIL 0x00000001 /* ro */ + +#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR) + +/* + * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC) + * 16 modes, each mode is 512 bytes. + * Unless specified, the register should valid for all modes. + */ +#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO + +#define CSEQm_CIO_REG(Mode, Reg) \ + (CSEQ_CIO_REG_BASE_ADR + \ + ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg)) + +#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR) + +#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE) + +#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG) + +#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG) + +#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL) + +#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK) + +#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1) + +#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT) + +#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM) + +#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX) + +#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX) + +#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES) + +#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS) + +#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR) + +#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR) + +#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR) + +#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE) + +#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR) + +#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR) + +#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR) + +#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR) + +#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT) + +#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE) + +#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR) + +#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR) + +#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE) + +#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28) + +#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C) + +/* mode 0-7 */ +#define MnREQMBX 0x30 +#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30) + +/* mode 8 */ +#define CSEQCON CSEQm_CIO_REG(8, 0x30) + +/* mode 0-7 */ +#define MnRSPMBX 0x34 +#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34) + +/* mode 8 */ +#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34) + +/* mode 8 */ +#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35) + +/* mode 8 */ +#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36) + +/* mode 8 */ +#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37) + +#define CSHALTERR 0x10 +#define RESETCSDMA 0x08 /* wo */ +#define STARTCSDMA 0x04 +#define STOPCSDMA 0x02 /* wo */ +#define CSDMAACT 0x01 /* ro */ + +/* mode 0-7 */ +#define MnINT 0x38 +#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38) + +#define CMnREQMBXE 0x02 +#define CMnRSPMBXF 0x01 +#define CMnINT_MASK 0x00000003 + +/* mode 8 */ +#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38) + +/* mode 0-7 */ +#define MnINTEN 0x3C +#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C) + +#define EN_CMnRSPMBXF 0x01 + +/* mode 8 */ +#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C) + +/* mode 8 */ +#define CSDMAADR CSEQm_CIO_REG(8, 0x40) + +/* mode 8 */ +#define CSDMACNT CSEQm_CIO_REG(8, 0x48) + +/* mode 8 */ +#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D) + +#define DONELISTEND 0x10 +#define DONELISTSIZE_MASK 0x0F +#define DONELISTSIZE_8ELEM 0x01 +#define DONELISTSIZE_16ELEM 0x02 +#define DONELISTSIZE_32ELEM 0x03 +#define DONELISTSIZE_64ELEM 0x04 +#define DONELISTSIZE_128ELEM 0x05 +#define DONELISTSIZE_256ELEM 0x06 +#define DONELISTSIZE_512ELEM 0x07 +#define DONELISTSIZE_1024ELEM 0x08 +#define DONELISTSIZE_2048ELEM 0x09 +#define DONELISTSIZE_4096ELEM 0x0A +#define DONELISTSIZE_8192ELEM 0x0B +#define DONELISTSIZE_16384ELEM 0x0C + +/* mode 8 */ +#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E) + +/* mode 11 */ +#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50) + +/* mode 11 */ +#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52) + +/* mode 11 */ +#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54) + +#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60) + +#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61) + +#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62) + +#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64) + +#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68) + +/* mode 8, 32x32 bits, 128 bytes of mapped buffer */ +#define CSBUFFER CSEQm_CIO_REG(8, 0x80) + +#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0) + +/* mode 0-8 */ +#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0) + +/* + * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC) + */ +#define CSEQ_RAM_REG_BASE_ADR 0xB8004000 + +/* + * The common definitions below have the same address offset for all the Link + * sequencers. + */ +#define MODECTL 0x40 + +#define DBGMODE 0x44 + +#define CONTROL 0x48 +#define LEDTIMER 0x00010000 +#define LEDTIMERS_10us 0x00000000 +#define LEDTIMERS_1ms 0x00000800 +#define LEDTIMERS_100ms 0x00001000 +#define LEDMODE_TXRX 0x00000000 +#define LEDMODE_CONNECTED 0x00000200 +#define LEDPOL 0x00000100 + +#define LSEQRAM 0x1000 + +/* + * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC) + */ +#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000 +#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000 +#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000 +#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000 +#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000 +#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000 +#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000 +#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000 + +#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2CTL) + +#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2INT) + +#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2INTEN) + +#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + DBGMODE) + +#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + CONTROL) + +#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2BREAKADR01) + +#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2BREAKADR23) + +#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + MODECTL) + +#define LmAUTODISCI 0x08000000 +#define LmDSBLBITLT 0x04000000 +#define LmDSBLANTT 0x02000000 +#define LmDSBLCRTT 0x01000000 +#define LmDSBLCONT 0x00000100 +#define LmPRIMODE 0x00000080 +#define LmDSBLHOLD 0x00000040 +#define LmDISACK 0x00000020 +#define LmBLIND48 0x00000010 +#define LmRCVMODE_MASK 0x0000000C +#define LmRCVMODE_PLD 0x00000000 +#define LmRCVMODE_HPC 0x00000004 + +#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + DBGMODE) + +#define LmFRCPERR 0x80000000 +#define LmMEMSEL_MASK 0x30000000 +#define LmFRCRBPERR 0x00000000 +#define LmFRCTBPERR 0x10000000 +#define LmFRCSGBPERR 0x20000000 +#define LmFRCARBPERR 0x30000000 +#define LmRCVIDW 0x00080000 +#define LmINVDWERR 0x00040000 +#define LmRCVDISP 0x00004000 +#define LmDISPERR 0x00002000 +#define LmDSBLDSCR 0x00000800 +#define LmDSBLSCR 0x00000400 +#define LmFRCNAK 0x00000200 +#define LmFRCROFS 0x00000100 +#define LmFRCCRC 0x00000080 +#define LmFRMTYPE_MASK 0x00000070 +#define LmSG_DATA 0x00000000 +#define LmSG_COMMAND 0x00000010 +#define LmSG_TASK 0x00000020 +#define LmSG_TGTXFER 0x00000030 +#define LmSG_RESPONSE 0x00000040 +#define LmSG_IDENADDR 0x00000050 +#define LmSG_OPENADDR 0x00000060 +#define LmDISCRCGEN 0x00000008 +#define LmDISCRCCHK 0x00000004 +#define LmSSXMTFRM 0x00000002 +#define LmSSRCVFRM 0x00000001 + +#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + CONTROL) + +#define LmSTEPXMTFRM 0x00000002 +#define LmSTEPRCVFRM 0x00000001 + +#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + BISTCTL0) + +#define ARBBISTEN 0x40000000 +#define ARBBISTDN 0x20000000 /* ro */ +#define ARBBISTFAIL 0x10000000 /* ro */ +#define TBBISTEN 0x00000400 +#define TBBISTDN 0x00000200 /* ro */ +#define TBBISTFAIL 0x00000100 /* ro */ +#define RBBISTEN 0x00000040 +#define RBBISTDN 0x00000020 /* ro */ +#define RBBISTFAIL 0x00000010 /* ro */ +#define SGBISTEN 0x00000004 +#define SGBISTDN 0x00000002 /* ro */ +#define SGBISTFAIL 0x00000001 /* ro */ + +#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\ + BISTCTL1) + +#define LmRAMPAGE1 0x00000200 +#define LmRAMPAGE0 0x00000100 +#define LmIMEMBISTEN 0x00000040 +#define LmIMEMBISTDN 0x00000020 /* ro */ +#define LmIMEMBISTFAIL 0x00000010 /* ro */ +#define LmSCRBISTEN 0x00000004 +#define LmSCRBISTDN 0x00000002 /* ro */ +#define LmSCRBISTFAIL 0x00000001 /* ro */ +#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0) +#define LmRAMPAGE_LSHIFT 0x8 + +#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\ + MAPPEDSCR) + +#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\ + LSEQRAM) + +/* + * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC) + * 8 modes, each mode is 512 bytes. + * Unless specified, the register should valid for all modes. + */ +#define LmSEQ_CIOBUS_REG_BASE 0x2000 + +#define LmSEQ_PHY_BASE(Mode, LinkNum) \ + (LSEQ0_HOST_REG_BASE_ADR + \ + (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \ + LmSEQ_CIOBUS_REG_BASE + \ + ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE)) + +#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \ + (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg)) + +#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR) + +#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE) + +#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG) + +#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG) + +#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL) + +#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK) + +#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1) + +#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT) + +#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM) + +#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX) + +#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX) + +#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES) + +#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS) + +#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR) + +#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR) + +#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR) + +#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE) + +#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR) + +#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR) + +#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR) + +#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR) + +#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT) + +#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE) + +#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \ + MnSCRATCHPAGE) + +#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28) + +#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30) + +#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34) + +#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38) + +#define CTXMEMSIZE 0x80000000 /* ro */ +#define LmACKREQ 0x08000000 +#define LmNAKREQ 0x04000000 +#define LmMnXMTERR 0x02000000 +#define LmM5OOBSVC 0x01000000 +#define LmHWTINT 0x00800000 +#define LmMnCTXDONE 0x00100000 +#define LmM2REQMBXF 0x00080000 +#define LmM2RSPMBXE 0x00040000 +#define LmMnDMAERR 0x00020000 +#define LmRCVPRIM 0x00010000 +#define LmRCVERR 0x00008000 +#define LmADDRRCV 0x00004000 +#define LmMnHDRMISS 0x00002000 +#define LmMnWAITSCB 0x00001000 +#define LmMnRLSSCB 0x00000800 +#define LmMnSAVECTX 0x00000400 +#define LmMnFETCHSG 0x00000200 +#define LmMnLOADCTX 0x00000100 +#define LmMnCFGICL 0x00000080 +#define LmMnCFGSATA 0x00000040 +#define LmMnCFGEXPSATA 0x00000020 +#define LmMnCFGCMPLT 0x00000010 +#define LmMnCFGRBUF 0x00000008 +#define LmMnSAVETTR 0x00000004 +#define LmMnCFGRDAT 0x00000002 +#define LmMnCFGHDR 0x00000001 + +#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C) + +#define EN_LmACKREQ 0x08000000 +#define EN_LmNAKREQ 0x04000000 +#define EN_LmMnXMTERR 0x02000000 +#define EN_LmM5OOBSVC 0x01000000 +#define EN_LmHWTINT 0x00800000 +#define EN_LmMnCTXDONE 0x00100000 +#define EN_LmM2REQMBXF 0x00080000 +#define EN_LmM2RSPMBXE 0x00040000 +#define EN_LmMnDMAERR 0x00020000 +#define EN_LmRCVPRIM 0x00010000 +#define EN_LmRCVERR 0x00008000 +#define EN_LmADDRRCV 0x00004000 +#define EN_LmMnHDRMISS 0x00002000 +#define EN_LmMnWAITSCB 0x00001000 +#define EN_LmMnRLSSCB 0x00000800 +#define EN_LmMnSAVECTX 0x00000400 +#define EN_LmMnFETCHSG 0x00000200 +#define EN_LmMnLOADCTX 0x00000100 +#define EN_LmMnCFGICL 0x00000080 +#define EN_LmMnCFGSATA 0x00000040 +#define EN_LmMnCFGEXPSATA 0x00000020 +#define EN_LmMnCFGCMPLT 0x00000010 +#define EN_LmMnCFGRBUF 0x00000008 +#define EN_LmMnSAVETTR 0x00000004 +#define EN_LmMnCFGRDAT 0x00000002 +#define EN_LmMnCFGHDR 0x00000001 + +#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \ + EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \ + EN_LmMnCFGHDR | EN_LmRCVERR | \ + EN_LmADDRRCV | EN_LmMnHDRMISS | \ + EN_LmMnRLSSCB | EN_LmMnSAVECTX | \ + EN_LmMnFETCHSG | EN_LmMnLOADCTX | \ + EN_LmHWTINT | EN_LmMnCTXDONE | \ + EN_LmRCVPRIM | EN_LmMnCFGSATA | \ + EN_LmMnCFGEXPSATA | EN_LmMnDMAERR) + +#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \ + EN_LmMnRLSSCB | EN_LmMnSAVECTX | \ + EN_LmMnFETCHSG | EN_LmMnLOADCTX | \ + EN_LmMnXMTERR | EN_LmHWTINT | \ + EN_LmMnCTXDONE | EN_LmRCVPRIM | \ + EN_LmRCVERR | EN_LmMnDMAERR) + +#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \ + EN_LmM2REQMBXF | EN_LmRCVPRIM | \ + EN_LmRCVERR) + +#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \ + EN_LmHWTINT | EN_LmRCVPRIM | \ + EN_LmRCVERR) + +#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40) + +#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44) + +#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45) + +#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46) + +#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47) + +#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48) + +#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48) +#define SAS_ALIGN_DEFAULT 0xFF + +#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49) + +#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49) +#define STP_ALIGN_DEFAULT 0x1F + +#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A) + +#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A) + +#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B) + +#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B) + +#define LmDISALIGN 0x20 +#define LmROTSTPALIGN 0x10 +#define LmSTPALIGN 0x08 +#define LmROTNOTIFY 0x04 +#define LmDUALALIGN 0x02 +#define LmROTALIGN 0x01 + +#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C) + +#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C) + +#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E) + +#define LmMnBUFPERR 0x01 + +/* mode 0-1 */ +#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59) + +#define LmMnXFRLVL_128 0x05 +#define LmMnXFRLVL_256 0x04 +#define LmMnXFRLVL_512 0x03 +#define LmMnXFRLVL_1024 0x02 +#define LmMnXFRLVL_1536 0x01 +#define LmMnXFRLVL_2048 0x00 + + /* mode 0-1 */ +#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A) + +#define LmMnRESETSG 0x04 +#define LmMnSTOPSG 0x02 +#define LmMnSTARTSG 0x01 + +/* mode 0-1 */ +#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B) + +/* mode 0-1 */ +#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C) + +#define LmMnFLUSH 0x40 /* wo */ +#define LmMnRLSRTRY 0x20 /* wo */ +#define LmMnDISCARD 0x10 /* wo */ +#define LmMnRESETDAT 0x08 /* wo */ +#define LmMnSUSDAT 0x04 /* wo */ +#define LmMnSTOPDAT 0x02 /* wo */ +#define LmMnSTARTDAT 0x01 /* wo */ + +/* mode 0-1 */ +#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D) + +#define LmMnDPEMPTY 0x80 +#define LmMnFLUSHING 0x40 +#define LmMnDDMAREQ 0x20 +#define LmMnHDMAREQ 0x10 +#define LmMnDATFREE 0x08 +#define LmMnDATSUS 0x04 +#define LmMnDATACT 0x02 +#define LmMnDATEN 0x01 + +/* mode 0-1 */ +#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E) + +#define LmMnDMATYPE_NORMAL 0x0000 +#define LmMnDMATYPE_HOST_ONLY_TX 0x0001 +#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002 +#define LmMnDMATYPE_INVALID 0x0003 +#define LmMnDMATYPE_MASK 0x0003 + +#define LmMnDMAWRAP 0x0004 +#define LmMnBITBUCKET 0x0008 +#define LmMnDISHDR 0x0010 +#define LmMnSTPCRC 0x0020 +#define LmXTEST 0x0040 +#define LmMnDISCRC 0x0080 +#define LmMnENINTLK 0x0100 +#define LmMnADDRFRM 0x0400 +#define LmMnENXMTCRC 0x0800 + +/* mode 0-1 */ +#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70) + +/* mode 0-1 */ +#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B) +#define LmMnDPSEL_MASK 0x07 +#define LmMnEOLPRE 0x40 +#define LmMnEOSPRE 0x80 + +/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */ +/* Receive Mode n = 0 */ +#define LmMnHRADDR 0x00 +#define LmMnHBYTECNT 0x01 +#define LmMnHREWIND 0x02 +#define LmMnDWADDR 0x03 +#define LmMnDSPACECNT 0x04 +#define LmMnDFRMSIZE 0x05 + +/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */ +/* Transmit Mode n = 1 */ +#define LmMnHWADDR 0x00 +#define LmMnHSPACECNT 0x01 +/* #define LmMnHREWIND 0x02 */ +#define LmMnDRADDR 0x03 +#define LmMnDBYTECNT 0x04 +/* #define LmMnDFRMSIZE 0x05 */ + +/* mode 0-1 */ +#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78) +#define LmMnDPACC_MASK 0x00FFFFFF + +/* mode 0-1 */ +#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D) + +#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80) +#define LmPRMSTAT0BYTE0 0x80 +#define LmPRMSTAT0BYTE1 0x81 +#define LmPRMSTAT0BYTE2 0x82 +#define LmPRMSTAT0BYTE3 0x83 + +#define LmFRAMERCVD 0x80000000 +#define LmXFRRDYRCVD 0x40000000 +#define LmUNKNOWNP 0x20000000 +#define LmBREAK 0x10000000 +#define LmDONE 0x08000000 +#define LmOPENACPT 0x04000000 +#define LmOPENRJCT 0x02000000 +#define LmOPENRTRY 0x01000000 +#define LmCLOSERV1 0x00800000 +#define LmCLOSERV0 0x00400000 +#define LmCLOSENORM 0x00200000 +#define LmCLOSECLAF 0x00100000 +#define LmNOTIFYRV2 0x00080000 +#define LmNOTIFYRV1 0x00040000 +#define LmNOTIFYRV0 0x00020000 +#define LmNOTIFYSPIN 0x00010000 +#define LmBROADRV4 0x00008000 +#define LmBROADRV3 0x00004000 +#define LmBROADRV2 0x00002000 +#define LmBROADRV1 0x00001000 +#define LmBROADSES 0x00000800 +#define LmBROADRVCH1 0x00000400 +#define LmBROADRVCH0 0x00000200 +#define LmBROADCH 0x00000100 +#define LmAIPRVWP 0x00000080 +#define LmAIPWP 0x00000040 +#define LmAIPWD 0x00000020 +#define LmAIPWC 0x00000010 +#define LmAIPRV2 0x00000008 +#define LmAIPRV1 0x00000004 +#define LmAIPRV0 0x00000002 +#define LmAIPNRML 0x00000001 + +#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \ + LmBROADRVCH1) + +#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84) +#define LmPRMSTAT1BYTE0 0x84 +#define LmPRMSTAT1BYTE1 0x85 +#define LmPRMSTAT1BYTE2 0x86 +#define LmPRMSTAT1BYTE3 0x87 + +#define LmFRMRCVDSTAT 0x80000000 +#define LmBREAK_DET 0x04000000 +#define LmCLOSE_DET 0x02000000 +#define LmDONE_DET 0x01000000 +#define LmXRDY 0x00040000 +#define LmSYNCSRST 0x00020000 +#define LmSYNC 0x00010000 +#define LmXHOLD 0x00008000 +#define LmRRDY 0x00004000 +#define LmHOLD 0x00002000 +#define LmROK 0x00001000 +#define LmRIP 0x00000800 +#define LmCRBLK 0x00000400 +#define LmACK 0x00000200 +#define LmNAK 0x00000100 +#define LmHARDRST 0x00000080 +#define LmERROR 0x00000040 +#define LmRERR 0x00000020 +#define LmPMREQP 0x00000010 +#define LmPMREQS 0x00000008 +#define LmPMACK 0x00000004 +#define LmPMNAK 0x00000002 +#define LmDMAT 0x00000001 + +/* mode 1 */ +#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E) +#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93) + +/* mode 0 */ +#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0) + +#define LmACRCERR 0x00000800 +#define LmPHYOVRN 0x00000400 +#define LmOBOVRN 0x00000200 +#define LmMnZERODATA 0x00000100 +#define LmSATAINTLK 0x00000080 +#define LmMnCRCERR 0x00000020 +#define LmRRDYOVRN 0x00000010 +#define LmMISSSOAF 0x00000008 +#define LmMISSSOF 0x00000004 +#define LmMISSEOAF 0x00000002 +#define LmMISSEOF 0x00000001 + +#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4) + +#define EN_LmACRCERR 0x00000800 +#define EN_LmPHYOVRN 0x00000400 +#define EN_LmOBOVRN 0x00000200 +#define EN_LmMnZERODATA 0x00000100 +#define EN_LmSATAINTLK 0x00000080 +#define EN_LmFRMBAD 0x00000040 +#define EN_LmMnCRCERR 0x00000020 +#define EN_LmRRDYOVRN 0x00000010 +#define EN_LmMISSSOAF 0x00000008 +#define EN_LmMISSSOF 0x00000004 +#define EN_LmMISSEOAF 0x00000002 +#define EN_LmMISSEOF 0x00000001 + +#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \ + EN_LmRRDYOVRN | EN_LmMISSSOF | \ + EN_LmMISSEOAF | EN_LmMISSEOF | \ + EN_LmACRCERR | LmPHYOVRN | \ + EN_LmOBOVRN | EN_LmMnZERODATA) + +#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5) + +#define EN_LmDONETO 0x80 +#define EN_LmINVDISP 0x40 +#define EN_LmINVDW 0x20 +#define EN_LmDWSEVENT 0x08 +#define EN_LmCRTTTO 0x04 +#define EN_LmANTTTO 0x02 +#define EN_LmBITLTTO 0x01 + +#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \ + EN_LmDWSEVENT | EN_LmCRTTTO | \ + EN_LmANTTTO | EN_LmDONETO | \ + EN_LmBITLTTO) + +#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7) + +#define LmDONETO 0x80 +#define LmINVDISP 0x40 +#define LmINVDW 0x20 +#define LmDWSEVENT 0x08 +#define LmCRTTTO 0x04 +#define LmANTTTO 0x02 +#define LmBITLTTO 0x01 + +#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8) +#define LmDATABUFADR_MASK 0x0FFF + +#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA) + +#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0) + +#define EN_LmUNKNOWNP 0x20000000 +#define EN_LmBREAK 0x10000000 +#define EN_LmDONE 0x08000000 +#define EN_LmOPENACPT 0x04000000 +#define EN_LmOPENRJCT 0x02000000 +#define EN_LmOPENRTRY 0x01000000 +#define EN_LmCLOSERV1 0x00800000 +#define EN_LmCLOSERV0 0x00400000 +#define EN_LmCLOSENORM 0x00200000 +#define EN_LmCLOSECLAF 0x00100000 +#define EN_LmNOTIFYRV2 0x00080000 +#define EN_LmNOTIFYRV1 0x00040000 +#define EN_LmNOTIFYRV0 0x00020000 +#define EN_LmNOTIFYSPIN 0x00010000 +#define EN_LmBROADRV4 0x00008000 +#define EN_LmBROADRV3 0x00004000 +#define EN_LmBROADRV2 0x00002000 +#define EN_LmBROADRV1 0x00001000 +#define EN_LmBROADRV0 0x00000800 +#define EN_LmBROADRVCH1 0x00000400 +#define EN_LmBROADRVCH0 0x00000200 +#define EN_LmBROADCH 0x00000100 +#define EN_LmAIPRVWP 0x00000080 +#define EN_LmAIPWP 0x00000040 +#define EN_LmAIPWD 0x00000020 +#define EN_LmAIPWC 0x00000010 +#define EN_LmAIPRV2 0x00000008 +#define EN_LmAIPRV1 0x00000004 +#define EN_LmAIPRV0 0x00000002 +#define EN_LmAIPNRML 0x00000001 + +#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \ + EN_LmDONE | EN_LmOPENACPT | \ + EN_LmOPENRJCT | EN_LmOPENRTRY | \ + EN_LmCLOSERV1 | EN_LmCLOSERV0 | \ + EN_LmCLOSENORM | EN_LmCLOSECLAF | \ + EN_LmBROADRV4 | EN_LmBROADRV3 | \ + EN_LmBROADRV2 | EN_LmBROADRV1 | \ + EN_LmBROADRV0 | EN_LmBROADRVCH1 | \ + EN_LmBROADRVCH0 | EN_LmBROADCH | \ + EN_LmAIPRVWP | EN_LmAIPWP | \ + EN_LmAIPWD | EN_LmAIPWC | \ + EN_LmAIPRV2 | EN_LmAIPRV1 | \ + EN_LmAIPRV0 | EN_LmAIPNRML) + +#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4) + +#define EN_LmXRDY 0x00040000 +#define EN_LmSYNCSRST 0x00020000 +#define EN_LmSYNC 0x00010000 +#define EN_LmXHOLD 0x00008000 +#define EN_LmRRDY 0x00004000 +#define EN_LmHOLD 0x00002000 +#define EN_LmROK 0x00001000 +#define EN_LmRIP 0x00000800 +#define EN_LmCRBLK 0x00000400 +#define EN_LmACK 0x00000200 +#define EN_LmNAK 0x00000100 +#define EN_LmHARDRST 0x00000080 +#define EN_LmERROR 0x00000040 +#define EN_LmRERR 0x00000020 +#define EN_LmPMREQP 0x00000010 +#define EN_LmPMREQS 0x00000008 +#define EN_LmPMACK 0x00000004 +#define EN_LmPMNAK 0x00000002 +#define EN_LmDMAT 0x00000001 + +#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \ + EN_LmSYNCSRST | \ + EN_LmPMREQP | EN_LmPMREQS | \ + EN_LmPMACK | EN_LmPMNAK) + +#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8) + +#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC) + +#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0) + + +/* + * LmSEQ CIO Bus Mode 3 Register. + * Mode 3: Configuration and Setup, IOP Context SCB. + */ +#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48) + +#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90) + +#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92) + +#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94) + +#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96) + +#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98) + +#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A) + +#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C) + +#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E) + +#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4) + +#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6) + +#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0) + +#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4) + +#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2) + +#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8) + + +/* + * LmSEQ CIO Bus Mode 5 Registers. + * Mode 5: Phy/OOB Control and Status. + */ +#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg)) + +#define OOB_BFLTR 0x100 + +#define BFLTR_THR_MASK 0xF0 +#define BFLTR_TC_MASK 0x0F + +#define OOB_INIT_MIN 0x102 + +#define OOB_INIT_MAX 0x104 + +#define OOB_INIT_NEG 0x106 + +#define OOB_SAS_MIN 0x108 + +#define OOB_SAS_MAX 0x10A + +#define OOB_SAS_NEG 0x10C + +#define OOB_WAKE_MIN 0x10E + +#define OOB_WAKE_MAX 0x110 + +#define OOB_WAKE_NEG 0x112 + +#define OOB_IDLE_MAX 0x114 + +#define OOB_BURST_MAX 0x116 + +#define OOB_DATA_KBITS 0x126 + +#define OOB_ALIGN_0_DATA 0x12C + +#define OOB_ALIGN_1_DATA 0x130 + +#define D10_2_DATA_k 0x00 +#define SYNC_DATA_k 0x02 +#define ALIGN_1_DATA_k 0x04 +#define ALIGN_0_DATA_k 0x08 +#define BURST_DATA_k 0x10 + +#define OOB_PHY_RESET_COUNT 0x13C + +#define OOB_SIG_GEN 0x140 + +#define START_OOB 0x80 +#define START_DWS 0x40 +#define ALIGN_CNT3 0x30 +#define ALIGN_CNT2 0x20 +#define ALIGN_CNT1 0x10 +#define ALIGN_CNT4 0x00 +#define STOP_DWS 0x08 +#define SEND_COMSAS 0x04 +#define SEND_COMINIT 0x02 +#define SEND_COMWAKE 0x01 + +#define OOB_XMIT 0x141 + +#define TX_ENABLE 0x80 +#define XMIT_OOB_BURST 0x10 +#define XMIT_D10_2 0x08 +#define XMIT_SYNC 0x04 +#define XMIT_ALIGN_1 0x02 +#define XMIT_ALIGN_0 0x01 + +#define FUNCTION_MASK 0x142 + +#define SAS_MODE_DIS 0x80 +#define SATA_MODE_DIS 0x40 +#define SPINUP_HOLD_DIS 0x20 +#define HOT_PLUG_DIS 0x10 +#define SATA_PS_DIS 0x08 +#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS) + +#define OOB_MODE 0x143 + +#define SAS_MODE 0x80 +#define SATA_MODE 0x40 +#define SLOW_CLK 0x20 +#define FORCE_XMIT_15 0x08 +#define PHY_SPEED_60 0x04 +#define PHY_SPEED_30 0x02 +#define PHY_SPEED_15 0x01 + +#define CURRENT_STATUS 0x144 + +#define CURRENT_OOB_DONE 0x80 +#define CURRENT_LOSS_OF_SIGNAL 0x40 +#define CURRENT_SPINUP_HOLD 0x20 +#define CURRENT_HOT_PLUG_CNCT 0x10 +#define CURRENT_GTO_TIMEOUT 0x08 +#define CURRENT_OOB_TIMEOUT 0x04 +#define CURRENT_DEVICE_PRESENT 0x02 +#define CURRENT_OOB_ERROR 0x01 + +#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \ + CURRENT_GTO_TIMEOUT) + +#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \ + CURRENT_OOB_ERROR) + +#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \ + CURRENT_HOT_PLUG_CNCT | \ + CURRENT_DEVICE_PRESENT) + +#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \ + CURRENT_DEVICE_PRESENT) + +#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL + +#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \ + CURRENT_LOSS_OF_SIGNAL | \ + CURRENT_SPINUP_HOLD | \ + CURRENT_HOT_PLUG_CNCT | \ + CURRENT_GTO_TIMEOUT | \ + CURRENT_DEVICE_PRESENT | \ + CURRENT_OOB_ERROR ) + +#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \ + CURRENT_GTO_TIMEOUT | \ + CURRENT_OOB_TIMEOUT | \ + CURRENT_OOB_ERROR ) + +#define SPEED_MASK 0x145 + +#define SATA_SPEED_30_DIS 0x10 +#define SATA_SPEED_15_DIS 0x08 +#define SAS_SPEED_60_DIS 0x04 +#define SAS_SPEED_30_DIS 0x02 +#define SAS_SPEED_15_DIS 0x01 +#define SAS_SPEED_MASK_DEFAULT 0x00 + +#define OOB_TIMER_ENABLE 0x14D + +#define HOT_PLUG_EN 0x80 +#define RCD_EN 0x40 +#define COMTIMER_EN 0x20 +#define SNTT_EN 0x10 +#define SNLT_EN 0x04 +#define SNWT_EN 0x02 +#define ALIGN_EN 0x01 + +#define OOB_STATUS 0x14E + +#define OOB_DONE 0x80 +#define LOSS_OF_SIGNAL 0x40 /* ro */ +#define SPINUP_HOLD 0x20 +#define HOT_PLUG_CNCT 0x10 /* ro */ +#define GTO_TIMEOUT 0x08 /* ro */ +#define OOB_TIMEOUT 0x04 /* ro */ +#define DEVICE_PRESENT 0x02 /* ro */ +#define OOB_ERROR 0x01 /* ro */ + +#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \ + OOB_TIMEOUT | OOB_ERROR) + +#define OOB_STATUS_CLEAR 0x14F + +#define OOB_DONE_CLR 0x80 +#define LOSS_OF_SIGNAL_CLR 0x40 +#define SPINUP_HOLD_CLR 0x20 +#define HOT_PLUG_CNCT_CLR 0x10 +#define GTO_TIMEOUT_CLR 0x08 +#define OOB_TIMEOUT_CLR 0x04 +#define OOB_ERROR_CLR 0x01 + +#define HOT_PLUG_DELAY 0x150 +/* In 5 ms units. 20 = 100 ms. */ +#define HOTPLUG_DELAY_TIMEOUT 20 + + +#define INT_ENABLE_2 0x15A + +#define OOB_DONE_EN 0x80 +#define LOSS_OF_SIGNAL_EN 0x40 +#define SPINUP_HOLD_EN 0x20 +#define HOT_PLUG_CNCT_EN 0x10 +#define GTO_TIMEOUT_EN 0x08 +#define OOB_TIMEOUT_EN 0x04 +#define DEVICE_PRESENT_EN 0x02 +#define OOB_ERROR_EN 0x01 + +#define PHY_CONTROL_0 0x160 + +#define PHY_LOWPWREN_TX 0x80 +#define PHY_LOWPWREN_RX 0x40 +#define SPARE_REG_160_B5 0x20 +#define OFFSET_CANCEL_RX 0x10 + +/* bits 3:2 */ +#define PHY_RXCOMCENTER_60V 0x00 +#define PHY_RXCOMCENTER_70V 0x04 +#define PHY_RXCOMCENTER_80V 0x08 +#define PHY_RXCOMCENTER_90V 0x0C +#define PHY_RXCOMCENTER_MASK 0x0C + +#define PHY_RESET 0x02 +#define SAS_DEFAULT_SEL 0x01 + +#define PHY_CONTROL_1 0x161 + +/* bits 2:0 */ +#define SATA_PHY_DETLEVEL_50mv 0x00 +#define SATA_PHY_DETLEVEL_75mv 0x01 +#define SATA_PHY_DETLEVEL_100mv 0x02 +#define SATA_PHY_DETLEVEL_125mv 0x03 +#define SATA_PHY_DETLEVEL_150mv 0x04 +#define SATA_PHY_DETLEVEL_175mv 0x05 +#define SATA_PHY_DETLEVEL_200mv 0x06 +#define SATA_PHY_DETLEVEL_225mv 0x07 +#define SATA_PHY_DETLEVEL_MASK 0x07 + +/* bits 5:3 */ +#define SAS_PHY_DETLEVEL_50mv 0x00 +#define SAS_PHY_DETLEVEL_75mv 0x08 +#define SAS_PHY_DETLEVEL_100mv 0x10 +#define SAS_PHY_DETLEVEL_125mv 0x11 +#define SAS_PHY_DETLEVEL_150mv 0x20 +#define SAS_PHY_DETLEVEL_175mv 0x21 +#define SAS_PHY_DETLEVEL_200mv 0x30 +#define SAS_PHY_DETLEVEL_225mv 0x31 +#define SAS_PHY_DETLEVEL_MASK 0x38 + +#define PHY_CONTROL_2 0x162 + +/* bits 7:5 */ +#define SATA_PHY_DRV_400mv 0x00 +#define SATA_PHY_DRV_450mv 0x20 +#define SATA_PHY_DRV_500mv 0x40 +#define SATA_PHY_DRV_550mv 0x60 +#define SATA_PHY_DRV_600mv 0x80 +#define SATA_PHY_DRV_650mv 0xA0 +#define SATA_PHY_DRV_725mv 0xC0 +#define SATA_PHY_DRV_800mv 0xE0 +#define SATA_PHY_DRV_MASK 0xE0 + +/* bits 4:3 */ +#define SATA_PREEMP_0 0x00 +#define SATA_PREEMP_1 0x08 +#define SATA_PREEMP_2 0x10 +#define SATA_PREEMP_3 0x18 +#define SATA_PREEMP_MASK 0x18 + +#define SATA_CMSH1P5 0x04 + +/* bits 1:0 */ +#define SATA_SLEW_0 0x00 +#define SATA_SLEW_1 0x01 +#define SATA_SLEW_2 0x02 +#define SATA_SLEW_3 0x03 +#define SATA_SLEW_MASK 0x03 + +#define PHY_CONTROL_3 0x163 + +/* bits 7:5 */ +#define SAS_PHY_DRV_400mv 0x00 +#define SAS_PHY_DRV_450mv 0x20 +#define SAS_PHY_DRV_500mv 0x40 +#define SAS_PHY_DRV_550mv 0x60 +#define SAS_PHY_DRV_600mv 0x80 +#define SAS_PHY_DRV_650mv 0xA0 +#define SAS_PHY_DRV_725mv 0xC0 +#define SAS_PHY_DRV_800mv 0xE0 +#define SAS_PHY_DRV_MASK 0xE0 + +/* bits 4:3 */ +#define SAS_PREEMP_0 0x00 +#define SAS_PREEMP_1 0x08 +#define SAS_PREEMP_2 0x10 +#define SAS_PREEMP_3 0x18 +#define SAS_PREEMP_MASK 0x18 + +#define SAS_CMSH1P5 0x04 + +/* bits 1:0 */ +#define SAS_SLEW_0 0x00 +#define SAS_SLEW_1 0x01 +#define SAS_SLEW_2 0x02 +#define SAS_SLEW_3 0x03 +#define SAS_SLEW_MASK 0x03 + +#define PHY_CONTROL_4 0x168 + +#define PHY_DONE_CAL_TX 0x80 +#define PHY_DONE_CAL_RX 0x40 +#define RX_TERM_LOAD_DIS 0x20 +#define TX_TERM_LOAD_DIS 0x10 +#define AUTO_TERM_CAL_DIS 0x08 +#define PHY_SIGDET_FLTR_EN 0x04 +#define OSC_FREQ 0x02 +#define PHY_START_CAL 0x01 + +/* + * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC) + */ +#define PCIX_REG_BASE_ADR 0xB8040000 + +#define PCIC_VENDOR_ID 0x00 + +#define PCIC_DEVICE_ID 0x02 + +#define PCIC_COMMAND 0x04 + +#define INT_DIS 0x0400 +#define FBB_EN 0x0200 /* ro */ +#define SERR_EN 0x0100 +#define STEP_EN 0x0080 /* ro */ +#define PERR_EN 0x0040 +#define VGA_EN 0x0020 /* ro */ +#define MWI_EN 0x0010 +#define SPC_EN 0x0008 +#define MST_EN 0x0004 +#define MEM_EN 0x0002 +#define IO_EN 0x0001 + +#define PCIC_STATUS 0x06 + +#define PERR_DET 0x8000 +#define SERR_GEN 0x4000 +#define MABT_DET 0x2000 +#define TABT_DET 0x1000 +#define TABT_GEN 0x0800 +#define DPERR_DET 0x0100 +#define CAP_LIST 0x0010 +#define INT_STAT 0x0008 + +#define PCIC_DEVREV_ID 0x08 + +#define PCIC_CLASS_CODE 0x09 + +#define PCIC_CACHELINE_SIZE 0x0C + +#define PCIC_MBAR0 0x10 + +#define PCIC_MBAR0_OFFSET 0 + +#define PCIC_MBAR1 0x18 + +#define PCIC_MBAR1_OFFSET 2 + +#define PCIC_IOBAR 0x20 + +#define PCIC_IOBAR_OFFSET 4 + +#define PCIC_SUBVENDOR_ID 0x2C + +#define PCIC_SUBSYTEM_ID 0x2E + +#define PCIX_STATUS 0x44 +#define RCV_SCE 0x20000000 +#define UNEXP_SC 0x00080000 +#define SC_DISCARD 0x00040000 + +#define ECC_CTRL_STAT 0x48 +#define UNCOR_ECCERR 0x00000008 + +#define PCIC_PM_CSR 0x5C + +#define PWR_STATE_D0 0 +#define PWR_STATE_D1 1 /* not supported */ +#define PWR_STATE_D2 2 /* not supported */ +#define PWR_STATE_D3 3 + +#define PCIC_BASE1 0x6C /* internal use only */ + +#define BASE1_RSVD 0xFFFFFFF8 + +#define PCIC_BASEA 0x70 /* internal use only */ + +#define BASEA_RSVD 0xFFFFFFC0 +#define BASEA_START 0 + +#define PCIC_BASEB 0x74 /* internal use only */ + +#define BASEB_RSVD 0xFFFFFF80 +#define BASEB_IOMAP_MASK 0x7F +#define BASEB_START 0x80 + +#define PCIC_BASEC 0x78 /* internal use only */ + +#define BASEC_RSVD 0xFFFFFFFC +#define BASEC_MASK 0x03 +#define BASEC_START 0x58 + +#define PCIC_MBAR_KEY 0x7C /* internal use only */ + +#define MBAR_KEY_MASK 0xFFFFFFFF + +#define PCIC_HSTPCIX_CNTRL 0xA0 + +#define REWIND_DIS 0x0800 +#define SC_TMR_DIS 0x04000000 + +#define PCIC_MBAR0_MASK 0xA8 +#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000 +#define PCIC_MBAR0_SIZE_SHIFT 13 +#define PCIC_MBAR0_SIZE(val) \ + (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT) + +#define PCIC_FLASH_MBAR 0xB8 + +#define PCIC_INTRPT_STAT 0xD4 + +#define PCIC_TP_CTRL 0xFC + +/* + * EXSI Registers, Addresss Range: (0x00-0xFC) + */ +#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI + +#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00) + +#define OCMINITIALIZED 0x80000000 +#define ASIEN 0x00400000 +#define HCMODE 0x00200000 +#define PCIDEF 0x00100000 +#define COMSTOCK 0x00080000 +#define SEEPROMEND 0x00040000 +#define MSTTIMEN 0x00020000 +#define XREGEX 0x00000200 +#define NVRAMW 0x00000100 +#define NVRAMEX 0x00000080 +#define SRAMW 0x00000040 +#define SRAMEX 0x00000020 +#define FLASHW 0x00000010 +#define FLASHEX 0x00000008 +#define SEEPROMCFG 0x00000004 +#define SEEPROMTYP 0x00000002 +#define SEEPROMEX 0x00000001 + + +#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04) + +#define MODINT_EN 0x00000001 + + +#define PMSTATR (EXSI_REG_BASE_ADR + 0x10) + +#define FLASHRST 0x00000002 +#define FLASHRDY 0x00000001 + + +#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14) + +#define FLWEH_MASK 0x30000000 +#define FLWESU_MASK 0x0C000000 +#define FLWEPW_MASK 0x03F00000 +#define FLOEH_MASK 0x000C0000 +#define FLOESU_MASK 0x00030000 +#define FLOEPW_MASK 0x0000FC00 +#define FLCSH_MASK 0x00000300 +#define FLCSSU_MASK 0x000000C0 +#define FLCSPW_MASK 0x0000003F + +#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18) + +#define SRWEH_MASK 0x30000000 +#define SRWESU_MASK 0x0C000000 +#define SRWEPW_MASK 0x03F00000 + +#define SROEH_MASK 0x000C0000 +#define SROESU_MASK 0x00030000 +#define SROEPW_MASK 0x0000FC00 +#define SRCSH_MASK 0x00000300 +#define SRCSSU_MASK 0x000000C0 +#define SRCSPW_MASK 0x0000003F + +#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C) + +#define NVWEH_MASK 0x30000000 +#define NVWESU_MASK 0x0C000000 +#define NVWEPW_MASK 0x03F00000 +#define NVOEH_MASK 0x000C0000 +#define NVOESU_MASK 0x00030000 +#define NVOEPW_MASK 0x0000FC00 +#define NVCSH_MASK 0x00000300 +#define NVCSSU_MASK 0x000000C0 +#define NVCSPW_MASK 0x0000003F + +#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20) + +#define XRWEH_MASK 0x30000000 +#define XRWESU_MASK 0x0C000000 +#define XRWEPW_MASK 0x03F00000 +#define XROEH_MASK 0x000C0000 +#define XROESU_MASK 0x00030000 +#define XROEPW_MASK 0x0000FC00 +#define XRCSH_MASK 0x00000300 +#define XRCSSU_MASK 0x000000C0 +#define XRCSPW_MASK 0x0000003F + +#define XREGADDR (EXSI_REG_BASE_ADR + 0x24) + +#define XRADDRINCEN 0x80000000 +#define XREGADD_MASK 0x007FFFFF + + +#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28) + +#define XREGDATA_MASK 0x0000FFFF + +#define GPIOOER (EXSI_REG_BASE_ADR + 0x40) + +#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44) + +#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48) + +#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C) + +#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50) + +#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54) + +#define GPIO_EXTSRC 0x00000001 + +#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0) + +#define SXFERDONE 0x00000100 +#define SXFERCNT_MASK 0x000000E0 +#define SCMDTYP_MASK 0x0000001C +#define SXFERSTART 0x00000002 +#define SXFEREN 0x00000001 + +#define SRATER (EXSI_REG_BASE_ADR + 0xA4) + +#define SADDRR (EXSI_REG_BASE_ADR + 0xA8) + +#define SADDR_MASK 0x0000FFFF + +#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC) + +#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC) +#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD) +#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE) +#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF) + +#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0) + +#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0) +#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1) +#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2) +#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3) + +#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0) +#define ASIFMTERR 0x00000400 +#define ASISEECHKERR 0x00000200 +#define ASIERR 0x00000100 + +#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4) +#define CHECKSUM_MASK 0x0000FFFF + +#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8) +#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC) +#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0) +#define CPI2ASIBYTECNT_MASK 0x00070000 +#define CPI2ASIBYTEEN_MASK 0x0000F000 +#define CPI2ASITARGERR_MASK 0x00000F00 +#define CPI2ASITARGMID_MASK 0x000000F0 +#define CPI2ASIMSTERR_MASK 0x0000000F + +/* + * XSRAM, External SRAM (DWord and any BE pattern accessible) + */ +#define XSRAM_REG_BASE_ADDR 0xB8100000 +#define XSRAM_SIZE 0x100000 + +/* + * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF). + */ +#define NVRAM_REG_BASE_ADR 0xBF800000 +#define NVRAM_MAX_BASE_ADR 0x003FFFFF + +/* OCM base address */ +#define OCM_BASE_ADDR 0xA0000000 +#define OCM_MAX_SIZE 0x20000 + +/* + * Sequencers (Central and Link) Scratch RAM page definitions. + */ + +/* + * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024 + * byte memory. It is dword accessible and has byte parity + * protection. The CSEQ accesses it in 32 byte windows, either as mode + * dependent or mode independent memory. Each mode has 96 bytes, + * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of + * Mode Independent memory (four 32 byte pages 3-7). Note that mode + * dependent scratch memory, Mode 8, page 0-3 overlaps mode + * independent scratch memory, pages 0-3. + * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and + * 128 bytes in mode 8, + * - 259 bytes of mode independent scratch, common to modes 0-15. + * + * Sequencer scratch RAM is 1024 bytes. This scratch memory is + * divided into mode dependent and mode independent scratch with this + * memory further subdivided into pages of size 32 bytes. There are 5 + * pages (160 bytes) of mode independent scratch and 3 pages of + * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages + * 0-2 dependent scratch overlap with pages 0-2 of mode independent + * scratch memory. + * + * The host accesses this scratch in a different manner from the + * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE + * and CMnSCRPAGE to access the scratch memory. A flat mapping of the + * scratch memory is avaliable for software convenience and to prevent + * corruption while the sequencer is running. This memory is mapped + * onto addresses 800h - BFFh, total of 400h bytes. + * + * These addresses are mapped as follows: + * + * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1 + * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1 + * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1 + * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1 + * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1 + * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1 + * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1 + * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1 + * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2 + * Mode Independent Scratch Pages 0-2 + * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3 + * Mode Independent Scratch Page 3 + * A80h-AFFh Mode Independent Scratch Pages 4-7 + * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2 + * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2 + * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2 + * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2 + * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2 + * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2 + * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2 + * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2 + */ + +/* General macros */ +#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */ + +/* All macros start with offsets from base + 0x800 (CMAPPEDSCR). + * Mode dependent scratch page 0, mode 0. + * For modes 1-7 you have to do arithmetic. */ +#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000) +#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002) +#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004) +#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006) +#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008) + +/* Mode dependent scratch page 0 mode 8 macros. */ +#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200) +#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202) +#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204) +#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206) +#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208) +#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A) +#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C) +#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E) +#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210) +#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212) +#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213) +#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A) +#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C) +#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E) + +/* Mode dependent scratch page 1 mode 8 macros. */ +#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220) +#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228) + +/* Mode dependent scratch page 2 mode 8 macros */ +#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240) +#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248) +#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250) +#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254) + +/* Mode independent scratch page 4 macros. */ +#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280) +#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282) +#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284) +#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286) +#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288) +#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A) +#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C) +#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E) +#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290) +#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292) +#define CSEQ_REG0 (CMAPPEDSCR + 0x0294) +#define CSEQ_REG1 (CMAPPEDSCR + 0x0296) +#define CSEQ_REG2 (CMAPPEDSCR + 0x0298) +#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C) +#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D) +#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E) + +/* Mode independent scratch page 5 macros. */ +#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0) +#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8) +#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0) +#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2) +#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4) +#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6) +#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7) +#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8) + +/* Mode independent scratch page 6 macros. */ +#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0) +#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2) +#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4) +#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6) +#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7) +#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8) +#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA) +#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0) +#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2) +#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5) +#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6) +#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8) +#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA) +#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC) + +/* Mode independent scratch page 7 macros. */ +#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0) +#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8) +#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0) +#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2) +#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4) +#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6) +#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7) +#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8) +#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA) +#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC) + +/*************************************************************************** +* Link m Sequencer scratch RAM is 512 bytes. +* This scratch memory is divided into mode dependent and mode +* independent scratch with this memory further subdivided into +* pages of size 32 bytes. There are 4 pages (128 bytes) of +* mode independent scratch and 4 pages of dependent scratch +* memory for modes 0-2 (384 bytes). +* +* The host accesses this scratch in a different manner from the +* link sequencer. The sequencer has to use LSEQ registers +* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat +* mapping of the scratch memory is avaliable for software +* convenience and to prevent corruption while the sequencer is +* running. This memory is mapped onto addresses 800h - 9FFh. +* +* These addresses are mapped as follows: +* +* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2 +* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3 +* Mode Dependent Scratch Mode 5 Page 0 +* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2 +* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3 +* Mode Dependent Scratch Mode 5 Page 1 +* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2 +* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3 +* Mode Dependent Scratch Mode 5 Page 2 +* 980h-9DFh Mode Independent Scratch Pages 0-3 +* 9E0h-9FFh Mode Independent Scratch Page 3 +* Mode Dependent Scratch Mode 5 Page 3 +* +****************************************************************************/ +/* General macros */ +#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */ +#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */ +#define LSEQ_MODE5_PAGE0_OFFSET 0x60 + +/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */ +/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */ +#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000) +#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002) +#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004) + +/* Mode flag macros (byte 0) */ +#define SAS_SAVECTX_OCCURRED 0x80 +#define SAS_OOBSVC_OCCURRED 0x40 +#define SAS_OOB_DEVICE_PRESENT 0x20 +#define SAS_CFGHDR_OCCURRED 0x10 +#define SAS_RCV_INTS_ARE_DISABLED 0x08 +#define SAS_OOB_HOT_PLUG_CNCT 0x04 +#define SAS_AWAIT_OPEN_CONNECTION 0x02 +#define SAS_CFGCMPLT_OCCURRED 0x01 + +/* Mode flag macros (byte 1) */ +#define SAS_RLSSCB_OCCURRED 0x80 +#define SAS_FORCED_HEADER_MISS 0x40 + +#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006) +#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008) +#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B) +#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C) + +/* Mode dependent scratch page 0 macros for mode 0 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E) +#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010) +#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012) +#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014) +#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016) +#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A) +#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B) +#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C) +#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D) +#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E) + +/* Mode dependent scratch page 0 macros for mode 1 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E) +#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090) +#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092) +#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A) +#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B) +#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C) +#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D) +#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E) + +/* Mode dependent scratch page 0 macros for mode 2 (non-common) */ +#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E) +#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110) +#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112) +#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114) +#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116) +#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A) + +/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E) +#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F) +#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070) +#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072) +#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073) +#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074) +#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075) +#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076) +#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x007A) +#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C) +#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E) + +/* Mode dependent scratch page 1, mode 0 and mode 1 */ +#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020) +#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030) +#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0) +#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0) + +/* Mode dependent scratch page 1 macros for mode 2 */ +/* Absolute offsets */ +#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120) +#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124) +#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128) + +/* Mode dependent scratch page 1 macros for mode 4/5 */ +#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0) +#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1) +#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4) +#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5) +#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB) +#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0) +#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2) +#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4) + +/* Mode dependent scratch page 2 macros for mode 0 */ +/* Absolute offsets */ +#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040) +#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B) +#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C) +#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E) +#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F) + +/* Mode dependent scratch page 2 macros for mode 1 */ +/* Absolute offsets */ +/* byte 0 bits 1-0 are domain select. */ +#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0) +#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8) +#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC) +#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4) + +/* Mode dependent scratch page 2 macros for mode 2 */ +/* Absolute offsets */ +#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140) +#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144) +#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148) +#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C) +#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \ + (LmSCRATCH(LinkNum) + 0x0150) +#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154) + +/* Mode dependent scratch page 2 macros for mode 5 */ +#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160) +#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164) +#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168) +#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C) + +/* Mode dependent scratch page 3 macros for modes 0 and 1 */ +/* None defined */ + +/* Mode dependent scratch page 3 macros for modes 2 and 5 */ +/* None defined */ + +/* Mode Independent Scratch page 0 macros. */ +#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180) +#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182) +#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186) +#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187) +/* + * Currently only bit 0, SAS_DWSAQD, is used. + */ +#define SAS_DWSAQD 0x01 /* + * DWSSTATUS: DWSAQD + * bit las read in ISR. + */ +#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188) +/* Connection states (byte 0) */ +#define SAS_WE_OPENED_CS 0x01 +#define SAS_DEVICE_OPENED_CS 0x02 +#define SAS_WE_SENT_DONE_CS 0x04 +#define SAS_DEVICE_SENT_DONE_CS 0x08 +#define SAS_WE_SENT_CLOSE_CS 0x10 +#define SAS_DEVICE_SENT_CLOSE_CS 0x20 +#define SAS_WE_SENT_BREAK_CS 0x40 +#define SAS_DEVICE_SENT_BREAK_CS 0x80 +/* Connection states (byte 1) */ +#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01 +#define SAS_AIP_RECEIVED_CS 0x02 +#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04 +#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08 +#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10 +#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20 +/* Connection states (byte 2) */ +#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01 +#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02 +#define SAS_DEVICE_SENT_DMAT_CS 0x04 +#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08 +#define SAS_CLEARING_AFFILIATION_CS 0x20 +#define SAS_RXTASK_ACTIVE_CS 0x40 +#define SAS_TXTASK_ACTIVE_CS 0x80 +/* Connection states (byte 3) */ +#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01 +#define SAS_DWS_TIMER_EXPIRED_CS 0x02 +#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04 +#define SAS_PHY_DISABLED_CS 0x08 +#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10 +#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20 +#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40 +#define SAS_DEVICE_SENT_REG_FIS_CS 0x40 +#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80 +#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\ + SAS_DWS_TIMER_EXPIRED_CS |\ + SAS_LINK_RESET_NOT_COMPLETE_CS|\ + SAS_PHY_DISABLED_CS) + +#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\ + SAS_PHY_EVENT_TASK_ACTIVE_CS |\ + SAS_DEVICE_SENT_ID_FRAME_CS |\ + SAS_DEVICE_SENT_HARD_RESET_CS) + +#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C) +#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E) +#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F) +#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192) +#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194) +#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196) +#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198) + +/* Mode independent scratch page 1 macros. */ +#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0) +#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2) +#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4) +#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6) +#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8) +#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9) +#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA) +#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB) +#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC) +#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD) +#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE) +#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8) +#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC) +#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE) + +/* Mode independent scratch page 2 macros. */ +#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0) +#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2) +#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4) +#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6) +#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8) +#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9) +#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA) +#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB) +#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC) +#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD) +#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE) +#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4) + +/* Mode independent scratch page 3 macros. */ +#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0) +#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4) +#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8) +#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC) +#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0) +#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4) +#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8) +#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC) + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h new file mode 100644 index 0000000..64d2317 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sas.h @@ -0,0 +1,785 @@ +/* + * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_SAS_H_ +#define _AIC94XX_SAS_H_ + +#include + +/* ---------- DDBs ---------- */ +/* DDBs are device descriptor blocks which describe a device in the + * domain that this sequencer can maintain low-level connections for + * us. They are be 64 bytes. + */ + +struct asd_ddb_ssp_smp_target_port { + u8 conn_type; /* byte 0 */ +#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */ + + u8 conn_rate; + __be16 init_conn_tag; + u8 dest_sas_addr[8]; /* bytes 4-11 */ + + __le16 send_queue_head; + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_TARGET */ +#define DDB_TYPE_UNUSED 0xFF +#define DDB_TYPE_TARGET 0xFE +#define DDB_TYPE_INITIATOR 0xFD +#define DDB_TYPE_PM_PORT 0xFC + + __le16 _r_a; + __be16 awt_def; + + u8 compat_features; /* byte 20 */ + u8 pathway_blocked_count; + __be16 arb_wait_time; + __be32 more_compat_features; /* byte 24 */ + + u8 conn_mask; + u8 flags; /* concurrent conn:2,2 and open:0(1) */ +#define CONCURRENT_CONN_SUPP 0x04 +#define OPEN_REQUIRED 0x01 + + u16 _r_b; + __le16 exec_queue_tail; + __le16 send_queue_tail; + __le16 sister_ddb; + + __le16 _r_c; + + u8 max_concurrent_conn; + u8 num_concurrent_conn; + u8 num_contexts; + + u8 _r_d; + + __le16 active_task_count; + + u8 _r_e[9]; + + u8 itnl_reason; /* I_T nexus loss reason */ + + __le16 _r_f; + + __le16 itnl_timeout; +#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */ + + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +struct asd_ddb_stp_sata_target_port { + u8 conn_type; /* byte 0 */ + u8 conn_rate; + __be16 init_conn_tag; + u8 dest_sas_addr[8]; /* bytes 4-11 */ + + __le16 send_queue_head; + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_TARGET */ + + __le16 _r_a; + + __be16 awt_def; + u8 compat_features; /* byte 20 */ + u8 pathway_blocked_count; + __be16 arb_wait_time; + __be32 more_compat_features; /* byte 24 */ + + u8 conn_mask; + u8 flags; /* concurrent conn:2,2 and open:0(1) */ +#define SATA_MULTIPORT 0x80 +#define SUPPORTS_AFFIL 0x40 +#define STP_AFFIL_POL 0x20 + + u8 _r_b; + u8 flags2; /* STP close policy:0 */ +#define STP_CL_POL_NO_TX 0x00 +#define STP_CL_POL_BTW_CMDS 0x01 + + __le16 exec_queue_tail; + __le16 send_queue_tail; + __le16 sister_ddb; + __le16 ata_cmd_scbptr; + __le32 sata_tag_alloc_mask; + __le16 active_task_count; + __le16 _r_c; + __le32 sata_sactive; + u8 num_sata_tags; + u8 sata_status; + u8 sata_ending_status; + u8 itnl_reason; /* I_T nexus loss reason */ + __le16 ncq_data_scb_ptr; + __le16 itnl_timeout; + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +/* This struct asd_ddb_init_port, describes the device descriptor block + * of an initiator port (when the sequencer is operating in target mode). + * Bytes [0,11] and [20,27] are from the OPEN address frame. + * The sequencer allocates an initiator port DDB entry. + */ +struct asd_ddb_init_port { + u8 conn_type; /* byte 0 */ + u8 conn_rate; + __be16 init_conn_tag; /* BE */ + u8 dest_sas_addr[8]; + __le16 send_queue_head; /* LE, byte 12 */ + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_INITIATOR */ + __le16 _r_a; + __be16 awt_def; /* BE */ + u8 compat_features; + u8 pathway_blocked_count; + __be16 arb_wait_time; /* BE */ + __be32 more_compat_features; /* BE */ + u8 conn_mask; + u8 flags; /* == 5 */ + u16 _r_b; + __le16 exec_queue_tail; /* execution queue tail */ + __le16 send_queue_tail; + __le16 sister_ddb; + __le16 init_resp_timeout; /* initiator response timeout */ + __le32 _r_c; + __le16 active_tasks; /* active task count */ + __le16 init_list; /* initiator list link pointer */ + __le32 _r_d; + u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */ + u8 itnl_reason; /* I_T nexus loss reason */ + __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */ + __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */ + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_tag, describes a look-up table to be used + * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8: + * NCQ support. This table is used by the sequencers to find the + * corresponding SCB, given a SATA II tag value. + */ +struct asd_ddb_sata_tag { + __le16 scb_pointer[32]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_pm_table, describes a port number to + * connection handle look-up table. SATA targets attached to a port + * multiplier require a 4-bit port number value. There is one DDB + * entry of this type for each SATA port multiplier (sister DDB). + * Given a SATA PM port number, this table gives us the SATA PM Port + * DDB of the SATA port multiplier port (i.e. the SATA target + * discovered on the port). + */ +struct asd_ddb_sata_pm_table { + __le16 ddb_pointer[16]; + __le16 _r_a[16]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier + * port format DDB. + */ +struct asd_ddb_sata_pm_port { + u8 _r_a[15]; + u8 ddb_type; + u8 _r_b[13]; + u8 pm_port_flags; +#define PM_PORT_MASK 0xF0 +#define PM_PORT_SET 0x02 + u8 _r_c[6]; + __le16 sister_ddb; + __le16 ata_cmd_scbptr; + __le32 sata_tag_alloc_mask; + __le16 active_task_count; + __le16 parent_ddb; + __le32 sata_sactive; + u8 num_sata_tags; + u8 sata_status; + u8 sata_ending_status; + u8 _r_d[9]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_seq_shared, describes a DDB shared by the + * central and link sequencers. port_map_by_links is indexed phy + * number [0,7]; each byte is a bit mask of all the phys that are in + * the same port as the indexed phy. + */ +struct asd_ddb_seq_shared { + __le16 q_free_ddb_head; + __le16 q_free_ddb_tail; + __le16 q_free_ddb_cnt; + __le16 q_used_ddb_head; + __le16 q_used_ddb_tail; + __le16 shared_mem_lock; + __le16 smp_conn_tag; + __le16 est_nexus_buf_cnt; + __le16 est_nexus_buf_thresh; + u32 _r_a; + u8 settable_max_contexts; + u8 _r_b[23]; + u8 conn_not_active; + u8 phy_is_up; + u8 _r_c[8]; + u8 port_map_by_links[8]; +} __attribute__ ((packed)); + +/* ---------- SG Element ---------- */ + +/* This struct sg_el, describes the hardware scatter gather buffer + * element. All entries are little endian. In an SCB, there are 2 of + * this, plus one more, called a link element of this indicating a + * sublist if needed. + * + * A link element has only the bus address set and the flags (DS) bit + * valid. The bus address points to the start of the sublist. + * + * If a sublist is needed, then that sublist should also include the 2 + * sg_el embedded in the SCB, in which case next_sg_offset is 32, + * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case. + */ +struct sg_el { + __le64 bus_addr; + __le32 size; + __le16 _r; + u8 next_sg_offs; + u8 flags; +#define ASD_SG_EL_DS_MASK 0x30 +#define ASD_SG_EL_DS_OCM 0x10 +#define ASD_SG_EL_DS_HM 0x00 +#define ASD_SG_EL_LIST_MASK 0xC0 +#define ASD_SG_EL_LIST_EOL 0x40 +#define ASD_SG_EL_LIST_EOS 0x80 +} __attribute__ ((packed)); + +/* ---------- SCBs ---------- */ + +/* An SCB (sequencer control block) is comprised of a common header + * and a task part, for a total of 128 bytes. All fields are in LE + * order, unless otherwise noted. + */ + +/* This struct scb_header, defines the SCB header format. + */ +struct scb_header { + __le64 next_scb; + __le16 index; /* transaction context */ + u8 opcode; +} __attribute__ ((packed)); + +/* SCB opcodes: Execution queue + */ +#define INITIATE_SSP_TASK 0x00 +#define INITIATE_LONG_SSP_TASK 0x01 +#define INITIATE_BIDIR_SSP_TASK 0x02 +#define ABORT_TASK 0x03 +#define INITIATE_SSP_TMF 0x04 +#define SSP_TARG_GET_DATA 0x05 +#define SSP_TARG_GET_DATA_GOOD 0x06 +#define SSP_TARG_SEND_RESP 0x07 +#define QUERY_SSP_TASK 0x08 +#define INITIATE_ATA_TASK 0x09 +#define INITIATE_ATAPI_TASK 0x0a +#define CONTROL_ATA_DEV 0x0b +#define INITIATE_SMP_TASK 0x0c +#define SMP_TARG_SEND_RESP 0x0f + +/* SCB opcodes: Send Queue + */ +#define SSP_TARG_SEND_DATA 0x40 +#define SSP_TARG_SEND_DATA_GOOD 0x41 + +/* SCB opcodes: Link Queue + */ +#define CONTROL_PHY 0x80 +#define SEND_PRIMITIVE 0x81 +#define INITIATE_LINK_ADM_TASK 0x82 + +/* SCB opcodes: other + */ +#define EMPTY_SCB 0xc0 +#define INITIATE_SEQ_ADM_TASK 0xc1 +#define EST_ICL_TARG_WINDOW 0xc2 +#define COPY_MEM 0xc3 +#define CLEAR_NEXUS 0xc4 +#define INITIATE_DDB_ADM_TASK 0xc6 +#define ESTABLISH_NEXUS_ESCB 0xd0 + +#define LUN_SIZE 8 + +/* See SAS spec, task IU + */ +struct ssp_task_iu { + u8 lun[LUN_SIZE]; /* BE */ + u16 _r_a; + u8 tmf; + u8 _r_b; + __be16 tag; /* BE */ + u8 _r_c[14]; +} __attribute__ ((packed)); + +/* See SAS spec, command IU + */ +struct ssp_command_iu { + u8 lun[LUN_SIZE]; + u8 _r_a; + u8 efb_prio_attr; /* enable first burst, task prio & attr */ +#define EFB_MASK 0x80 +#define TASK_PRIO_MASK 0x78 +#define TASK_ATTR_MASK 0x07 + + u8 _r_b; + u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */ + union { + u8 cdb[16]; + struct { + __le64 long_cdb_addr; /* bus address, LE */ + __le32 long_cdb_size; /* LE */ + u8 _r_c[3]; + u8 eol_ds; /* eol:6,6, ds:5,4 */ + } long_cdb; /* sequencer extension */ + }; +} __attribute__ ((packed)); + +struct xfer_rdy_iu { + __be32 requested_offset; /* BE */ + __be32 write_data_len; /* BE */ + __be32 _r_a; +} __attribute__ ((packed)); + +/* ---------- SCB tasks ---------- */ + +/* This is both ssp_task and long_ssp_task + */ +struct initiate_ssp_task { + u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */ + __le32 total_xfer_len; + struct ssp_frame_hdr ssp_frame; + struct ssp_command_iu ssp_cmd; + __le16 sister_scb; /* 0xFFFF */ + __le16 conn_handle; /* index to DDB for the intended target */ + u8 data_dir; /* :1,0 */ +#define DATA_DIR_NONE 0x00 +#define DATA_DIR_IN 0x01 +#define DATA_DIR_OUT 0x02 +#define DATA_DIR_BYRECIPIENT 0x03 + + u8 _r_a; + u8 retry_count; + u8 _r_b[5]; + struct sg_el sg_element[3]; /* 2 real and 1 link */ +} __attribute__ ((packed)); + +/* This defines both ata_task and atapi_task. + * ata: C bit of FIS should be 1, + * atapi: C bit of FIS should be 1, and command register should be 0xA0, + * to indicate a packet command. + */ +struct initiate_ata_task { + u8 proto_conn_rate; + __le32 total_xfer_len; + struct host_to_dev_fis fis; + __le32 data_offs; + u8 atapi_packet[16]; + u8 _r_a[12]; + __le16 sister_scb; + __le16 conn_handle; + u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */ +#define CSMI_TASK 0x40 +#define DATA_XFER_MODE_DMA 0x10 +#define ATA_Q_TYPE_MASK 0x08 +#define ATA_Q_TYPE_UNTAGGED 0x00 +#define ATA_Q_TYPE_NCQ 0x08 + + u8 _r_b; + u8 retry_count; + u8 _r_c; + u8 flags; +#define STP_AFFIL_POLICY 0x20 +#define SET_AFFIL_POLICY 0x10 +#define RET_PARTIAL_SGLIST 0x02 + + u8 _r_d[3]; + struct sg_el sg_element[3]; +} __attribute__ ((packed)); + +struct initiate_smp_task { + u8 proto_conn_rate; + u8 _r_a[40]; + struct sg_el smp_req; + __le16 sister_scb; + __le16 conn_handle; + u8 _r_c[8]; + struct sg_el smp_resp; + u8 _r_d[32]; +} __attribute__ ((packed)); + +struct control_phy { + u8 phy_id; + u8 sub_func; +#define DISABLE_PHY 0x00 +#define ENABLE_PHY 0x01 +#define RELEASE_SPINUP_HOLD 0x02 +#define ENABLE_PHY_NO_SAS_OOB 0x03 +#define ENABLE_PHY_NO_SATA_OOB 0x04 +#define PHY_NO_OP 0x05 +#define EXECUTE_HARD_RESET 0x81 + + u8 func_mask; + u8 speed_mask; + u8 hot_plug_delay; + u8 port_type; + u8 flags; +#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01 +#define DISABLE_PHY_IF_OOB_FAILS 0x02 + + __le32 timeout_override; + u8 link_reset_retries; + u8 _r_a[47]; + __le16 conn_handle; + u8 _r_b[56]; +} __attribute__ ((packed)); + +struct control_ata_dev { + u8 proto_conn_rate; + __le32 _r_a; + struct host_to_dev_fis fis; + u8 _r_b[32]; + __le16 sister_scb; + __le16 conn_handle; + u8 ata_flags; /* 0 */ + u8 _r_c[55]; +} __attribute__ ((packed)); + +struct empty_scb { + u8 num_valid; + __le32 _r_a; +#define ASD_EDBS_PER_SCB 7 +/* header+data+CRC+DMA suffix data */ +#define ASD_EDB_SIZE (24+1024+4+16) + struct sg_el eb[ASD_EDBS_PER_SCB]; +#define ELEMENT_NOT_VALID 0xC0 +} __attribute__ ((packed)); + +struct initiate_link_adm { + u8 phy_id; + u8 sub_func; +#define GET_LINK_ERROR_COUNT 0x00 +#define RESET_LINK_ERROR_COUNT 0x01 +#define ENABLE_NOTIFY_SPINUP_INTS 0x02 + + u8 _r_a[57]; + __le16 conn_handle; + u8 _r_b[56]; +} __attribute__ ((packed)); + +struct copy_memory { + u8 _r_a; + __le16 xfer_len; + __le16 _r_b; + __le64 src_busaddr; + u8 src_ds; /* See definition of sg_el */ + u8 _r_c[45]; + __le16 conn_handle; + __le64 _r_d; + __le64 dest_busaddr; + u8 dest_ds; /* See definition of sg_el */ + u8 _r_e[39]; +} __attribute__ ((packed)); + +struct abort_task { + u8 proto_conn_rate; + __le32 _r_a; + struct ssp_frame_hdr ssp_frame; + struct ssp_task_iu ssp_task; + __le16 sister_scb; + __le16 conn_handle; + u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */ +#define SUSPEND_DATA_TRANS 0x04 + + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + __le16 index; /* Transaction context of task to be queried */ + __le16 itnl_to; + u8 _r_d[44]; +} __attribute__ ((packed)); + +struct clear_nexus { + u8 nexus; +#define NEXUS_ADAPTER 0x00 +#define NEXUS_PORT 0x01 +#define NEXUS_I_T 0x02 +#define NEXUS_I_T_L 0x03 +#define NEXUS_TAG 0x04 +#define NEXUS_TRANS_CX 0x05 +#define NEXUS_SATA_TAG 0x06 +#define NEXUS_T_L 0x07 +#define NEXUS_L 0x08 +#define NEXUS_T_TAG 0x09 + + __le32 _r_a; + u8 flags; +#define SUSPEND_TX 0x80 +#define RESUME_TX 0x40 +#define SEND_Q 0x04 +#define EXEC_Q 0x02 +#define NOTINQ 0x01 + + u8 _r_b[3]; + u8 conn_mask; + u8 _r_c[19]; + struct ssp_task_iu ssp_task; /* LUN and TAG */ + __le16 _r_d; + __le16 conn_handle; + __le64 _r_e; + __le16 index; /* Transaction context of task to be cleared */ + __le16 context; /* Clear nexus context */ + u8 _r_f[44]; +} __attribute__ ((packed)); + +struct initiate_ssp_tmf { + u8 proto_conn_rate; + __le32 _r_a; + struct ssp_frame_hdr ssp_frame; + struct ssp_task_iu ssp_task; + __le16 sister_scb; + __le16 conn_handle; + u8 flags; /* itnl override and suspend data tx */ +#define OVERRIDE_ITNL_TIMER 8 + + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + __le16 index; /* Transaction context of task to be queried */ + __le16 itnl_to; + u8 _r_d[44]; +} __attribute__ ((packed)); + +/* Transmits an arbitrary primitive on the link. + * Used for NOTIFY and BROADCAST. + */ +struct send_prim { + u8 phy_id; + u8 wait_transmit; /* :0,0 */ + u8 xmit_flags; +#define XMTPSIZE_MASK 0xF0 +#define XMTPSIZE_SINGLE 0x10 +#define XMTPSIZE_REPEATED 0x20 +#define XMTPSIZE_CONT 0x20 +#define XMTPSIZE_TRIPLE 0x30 +#define XMTPSIZE_REDUNDANT 0x60 +#define XMTPSIZE_INF 0 + +#define XMTCONTEN 0x04 +#define XMTPFRM 0x02 /* Transmit at the next frame boundary */ +#define XMTPIMM 0x01 /* Transmit immediately */ + + __le16 _r_a; + u8 prim[4]; /* K, D0, D1, D2 */ + u8 _r_b[50]; + __le16 conn_handle; + u8 _r_c[56]; +} __attribute__ ((packed)); + +/* This describes both SSP Target Get Data and SSP Target Get Data And + * Send Good Response SCBs. Used when the sequencer is operating in + * target mode... + */ +struct ssp_targ_get_data { + u8 proto_conn_rate; + __le32 total_xfer_len; + struct ssp_frame_hdr ssp_frame; + struct xfer_rdy_iu xfer_rdy; + u8 lun[LUN_SIZE]; + __le64 _r_a; + __le16 sister_scb; + __le16 conn_handle; + u8 data_dir; /* 01b */ + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + struct sg_el sg_element[3]; +} __attribute__ ((packed)); + +/* ---------- The actual SCB struct ---------- */ + +struct scb { + struct scb_header header; + union { + struct initiate_ssp_task ssp_task; + struct initiate_ata_task ata_task; + struct initiate_smp_task smp_task; + struct control_phy control_phy; + struct control_ata_dev control_ata_dev; + struct empty_scb escb; + struct initiate_link_adm link_adm; + struct copy_memory cp_mem; + struct abort_task abort_task; + struct clear_nexus clear_nexus; + struct initiate_ssp_tmf ssp_tmf; + }; +} __attribute__ ((packed)); + +/* ---------- Done List ---------- */ +/* The done list entry opcode field is defined below. + * The mnemonic encoding and meaning is as follows: + * TC - Task Complete, status was received and acknowledged + * TF - Task Failed, indicates an error prior to receiving acknowledgment + * for the command: + * - no conn, + * - NACK or R_ERR received in response to this command, + * - credit blocked or not available, or in the case of SMP request, + * - no SMP response was received. + * In these four cases it is known that the target didn't receive the + * command. + * TI - Task Interrupted, error after the command was acknowledged. It is + * known that the command was received by the target. + * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK + * (R_ERR) was received due to loss of signal, broken connection, loss of + * dword sync or other reason. The application client should send the + * appropriate task query. + * TA - Task Aborted, see TF. + * _RESP - The completion includes an empty buffer containing status. + * TO - Timeout. + */ +#define TC_NO_ERROR 0x00 +#define TC_UNDERRUN 0x01 +#define TC_OVERRUN 0x02 +#define TF_OPEN_TO 0x03 +#define TF_OPEN_REJECT 0x04 +#define TI_BREAK 0x05 +#define TI_PROTO_ERR 0x06 +#define TC_SSP_RESP 0x07 +#define TI_PHY_DOWN 0x08 +#define TF_PHY_DOWN 0x09 +#define TC_LINK_ADM_RESP 0x0a +#define TC_CSMI 0x0b +#define TC_ATA_RESP 0x0c +#define TU_PHY_DOWN 0x0d +#define TU_BREAK 0x0e +#define TI_SATA_TO 0x0f +#define TI_NAK 0x10 +#define TC_CONTROL_PHY 0x11 +#define TF_BREAK 0x12 +#define TC_RESUME 0x13 +#define TI_ACK_NAK_TO 0x14 +#define TF_SMPRSP_TO 0x15 +#define TF_SMP_XMIT_RCV_ERR 0x16 +#define TC_PARTIAL_SG_LIST 0x17 +#define TU_ACK_NAK_TO 0x18 +#define TU_SATA_TO 0x19 +#define TF_NAK_RECV 0x1a +#define TA_I_T_NEXUS_LOSS 0x1b +#define TC_ATA_R_ERR_RECV 0x1c +#define TF_TMF_NO_CTX 0x1d +#define TA_ON_REQ 0x1e +#define TF_TMF_NO_TAG 0x1f +#define TF_TMF_TAG_FREE 0x20 +#define TF_TMF_TASK_DONE 0x21 +#define TF_TMF_NO_CONN_HANDLE 0x22 +#define TC_TASK_CLEARED 0x23 +#define TI_SYNCS_RECV 0x24 +#define TU_SYNCS_RECV 0x25 +#define TF_IRTT_TO 0x26 +#define TF_NO_SMP_CONN 0x27 +#define TF_IU_SHORT 0x28 +#define TF_DATA_OFFS_ERR 0x29 +#define TF_INV_CONN_HANDLE 0x2a +#define TF_REQUESTED_N_PENDING 0x2b + +/* 0xc1 - 0xc7: empty buffer received, + 0xd1 - 0xd7: establish nexus empty buffer received +*/ +/* This is the ESCB mask */ +#define ESCB_RECVD 0xC0 + + +/* This struct done_list_struct defines the done list entry. + * All fields are LE. + */ +struct done_list_struct { + __le16 index; /* aka transaction context */ + u8 opcode; + u8 status_block[4]; + u8 toggle; /* bit 0 */ +#define DL_TOGGLE_MASK 0x01 +} __attribute__ ((packed)); + +/* ---------- PHYS ---------- */ + +struct asd_phy { + struct asd_sas_phy sas_phy; + struct asd_phy_desc *phy_desc; /* hw profile */ + + struct sas_identify_frame *identify_frame; + struct asd_dma_tok *id_frm_tok; + + u8 frame_rcvd[ASD_EDB_SIZE]; +}; + + +#define ASD_SCB_SIZE sizeof(struct scb) +#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port) + +/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent. + * Default: 0x10 (it's a mask) + */ +#define ASD_NOTIFY_ENABLE_SPINUP 0x10 + +/* If enabled, set this to the interval between transmission + * of NOTIFY (ENABLE SPINUP). In units of 200 us. + */ +#define ASD_NOTIFY_TIMEOUT 2500 + +/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP). + * If 0, transmit immediately. In milliseconds. + */ +#define ASD_NOTIFY_DOWN_COUNT 0 + +/* Device present timer timeout constant, 10 ms. */ +#define ASD_DEV_PRESENT_TIMEOUT 0x2710 + +#define ASD_SATA_INTERLOCK_TIMEOUT 0 + +/* How long to wait before shutting down an STP connection, unless + * an STP target sent frame(s). 50 usec. + * IGNORED by the sequencer (i.e. value 0 always). + */ +#define ASD_STP_SHUTDOWN_TIMEOUT 0x0 + +/* ATA soft reset timer timeout. 5 usec. */ +#define ASD_SRST_ASSERT_TIMEOUT 0x05 + +/* 31 sec */ +#define ASD_RCV_FIS_TIMEOUT 0x01D905C0 + +#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8 + +/* COMINIT timer */ +#define ASD_TEN_MILLISEC_TIMEOUT 0x2710 +#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT + +/* 1 sec */ +#define ASD_SMP_RCV_TIMEOUT 0x000F4240 + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c new file mode 100644 index 0000000..fc1b743 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -0,0 +1,732 @@ +/* + * Aic94xx SAS/SATA driver SCB management. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" + +#include "aic94xx_dump.h" + +/* ---------- EMPTY SCB ---------- */ + +#define DL_PHY_MASK 7 +#define BYTES_DMAED 0 +#define PRIMITIVE_RECVD 0x08 +#define PHY_EVENT 0x10 +#define LINK_RESET_ERROR 0x18 +#define TIMER_EVENT 0x20 +#define REQ_TASK_ABORT 0xF0 +#define REQ_DEVICE_RESET 0xF1 +#define SIGNAL_NCQ_ERROR 0xF2 +#define CLEAR_NCQ_ERROR 0xF3 + +#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \ + | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ + | CURRENT_OOB_ERROR) + +static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) +{ + switch (oob_mode & 7) { + case PHY_SPEED_60: + /* FIXME: sas transport class doesn't have this */ + phy->sas_phy.linkrate = PHY_LINKRATE_6; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + case PHY_SPEED_30: + phy->sas_phy.linkrate = PHY_LINKRATE_3; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_SPEED_15: + phy->sas_phy.linkrate = PHY_LINKRATE_1_5; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + } + if (oob_mode & SAS_MODE) + phy->sas_phy.oob_mode = SAS_OOB_MODE; + else if (oob_mode & SATA_MODE) + phy->sas_phy.oob_mode = SATA_OOB_MODE; +} + +static inline void asd_phy_event_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + int phy_id = dl->status_block[0] & DL_PHY_MASK; + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS; + u8 oob_mode = dl->status_block[2]; + + switch (oob_status) { + case CURRENT_LOSS_OF_SIGNAL: + /* directly attached device was removed */ + ASD_DPRINTK("phy%d: device unplugged\n", phy_id); + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(&phy->sas_phy); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + break; + case CURRENT_OOB_DONE: + /* hot plugged device */ + asd_turn_led(asd_ha, phy_id, 1); + get_lrate_mode(phy, oob_mode); + ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n", + phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + break; + case CURRENT_SPINUP_HOLD: + /* hot plug SATA, no COMWAKE sent */ + asd_turn_led(asd_ha, phy_id, 1); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case CURRENT_GTO_TIMEOUT: + case CURRENT_OOB_ERROR: + ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id, + dl->status_block[1]); + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(&phy->sas_phy); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + } +} + +/* If phys are enabled sparsely, this will do the right thing. */ +static inline unsigned ord_phy(struct asd_ha_struct *asd_ha, + struct asd_phy *phy) +{ + u8 enabled_mask = asd_ha->hw_prof.enabled_phys; + int i, k = 0; + + for_each_phy(enabled_mask, enabled_mask, i) { + if (&asd_ha->phys[i] == phy) + return k; + k++; + } + return 0; +} + +/** + * asd_get_attached_sas_addr -- extract/generate attached SAS address + * phy: pointer to asd_phy + * sas_addr: pointer to buffer where the SAS address is to be written + * + * This function extracts the SAS address from an IDENTIFY frame + * received. If OOB is SATA, then a SAS address is generated from the + * HA tables. + * + * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame + * buffer. + */ +static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr) +{ + if (phy->sas_phy.frame_rcvd[0] == 0x34 + && phy->sas_phy.oob_mode == SATA_OOB_MODE) { + struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; + /* FIS device-to-host */ + u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr); + + addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy); + *(__be64 *)sas_addr = cpu_to_be64(addr); + } else { + struct sas_identify_frame *idframe = + (void *) phy->sas_phy.frame_rcvd; + memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); + } +} + +static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int edb_id, int phy_id) +{ + unsigned long flags; + int edb_el = edb_id + ascb->edb_index; + struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el]; + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + struct sas_ha_struct *sas_ha = phy->sas_phy.ha; + u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2]; + + size = min(size, (u16) sizeof(phy->frame_rcvd)); + + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size); + phy->sas_phy.frame_rcvd_size = size; + asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + asd_dump_frame_rcvd(phy, dl); + sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED); +} + +static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int phy_id) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + u8 lr_error = dl->status_block[1]; + u8 retries_left = dl->status_block[2]; + + switch (lr_error) { + case 0: + ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id); + break; + case 1: + ASD_DPRINTK("phy%d: Loss of signal\n", phy_id); + break; + case 2: + ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id); + break; + case 3: + ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id); + break; + default: + ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n", + phy_id, lr_error); + break; + } + + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(sas_phy); + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + + if (retries_left == 0) { + int num = 1; + struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num, + GFP_ATOMIC); + if (!cp) { + asd_printk("%s: out of memory\n", __FUNCTION__); + goto out; + } + ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n", + phy_id); + asd_build_control_phy(cp, phy_id, ENABLE_PHY); + if (asd_post_ascb_list(ascb->ha, cp, 1) != 0) + asd_ascb_free(cp); + } +out: + ; +} + +static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int phy_id) +{ + unsigned long flags; + struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + u8 reg = dl->status_block[1]; + u32 cont = dl->status_block[2] << ((reg & 3)*8); + + reg &= ~3; + switch (reg) { + case LmPRMSTAT0BYTE0: + switch (cont) { + case LmBROADCH: + case LmBROADRVCH0: + case LmBROADRVCH1: + case LmBROADSES: + ASD_DPRINTK("phy%d: BROADCAST change received:%d\n", + phy_id, cont); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = ffs(cont); + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD); + break; + + case LmUNKNOWNP: + ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id); + break; + + default: + ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n", + phy_id, reg, cont); + break; + } + break; + case LmPRMSTAT1BYTE0: + switch (cont) { + case LmHARDRST: + ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n", + phy_id); + /* The sequencer disables all phys on that port. + * We have to re-enable the phys ourselves. */ + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + + default: + ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n", + phy_id, reg, cont); + break; + } + break; + default: + ASD_DPRINTK("unknown primitive register:0x%x\n", + dl->status_block[1]); + break; + } +} + +/** + * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB + * @ascb: pointer to Empty SCB + * @edb_id: index [0,6] to the empty data buffer which is to be invalidated + * + * After an EDB has been invalidated, if all EDBs in this ESCB have been + * invalidated, the ESCB is posted back to the sequencer. + * Context is tasklet/IRQ. + */ +void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) +{ + struct asd_seq_data *seq = &ascb->ha->seq; + struct empty_scb *escb = &ascb->scb->escb; + struct sg_el *eb = &escb->eb[edb_id]; + struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id]; + + memset(edb->vaddr, 0, ASD_EDB_SIZE); + eb->flags |= ELEMENT_NOT_VALID; + escb->num_valid--; + + if (escb->num_valid == 0) { + int i; + /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, " + "dma_handle: 0x%08llx, next: 0x%08llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (u64)ascb->dma_scb.dma_handle, + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + */ + escb->num_valid = ASD_EDBS_PER_SCB; + for (i = 0; i < ASD_EDBS_PER_SCB; i++) + escb->eb[i].flags = 0; + if (!list_empty(&ascb->list)) + list_del_init(&ascb->list); + i = asd_post_escb_list(ascb->ha, ascb, 1); + if (i) + asd_printk("couldn't post escb, err:%d\n", i); + } +} + +static void escb_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */ + u8 sb_opcode = dl->status_block[0]; + int phy_id = sb_opcode & DL_PHY_MASK; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + + if (edb > 6 || edb < 0) { + ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", + edb, dl->opcode); + ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n", + sb_opcode, phy_id); + ASD_DPRINTK("escb: vaddr: 0x%p, " + "dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + } + + sb_opcode &= ~DL_PHY_MASK; + + switch (sb_opcode) { + case BYTES_DMAED: + ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id); + asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id); + break; + case PRIMITIVE_RECVD: + ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__, + phy_id); + asd_primitive_rcvd_tasklet(ascb, dl, phy_id); + break; + case PHY_EVENT: + ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id); + asd_phy_event_tasklet(ascb, dl); + break; + case LINK_RESET_ERROR: + ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__, + phy_id); + asd_link_reset_err_tasklet(ascb, dl, phy_id); + break; + case TIMER_EVENT: + ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n", + __FUNCTION__, phy_id); + asd_turn_led(asd_ha, phy_id, 0); + /* the device is gone */ + sas_phy_disconnected(sas_phy); + sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); + break; + case REQ_TASK_ABORT: + ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__, + phy_id); + break; + case REQ_DEVICE_RESET: + ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__, + phy_id); + break; + case SIGNAL_NCQ_ERROR: + ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__, + phy_id); + break; + case CLEAR_NCQ_ERROR: + ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__, + phy_id); + break; + default: + ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, + phy_id, sb_opcode); + ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", + edb, dl->opcode); + ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n", + sb_opcode, phy_id); + ASD_DPRINTK("escb: vaddr: 0x%p, " + "dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + + break; + } + + asd_invalidate_edb(ascb, edb); +} + +int asd_init_post_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_escbs; i++) + seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete; + + ASD_DPRINTK("posting %d escbs\n", i); + return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs); +} + +/* ---------- CONTROL PHY ---------- */ + +#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \ + | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ + | CURRENT_OOB_ERROR) + +/** + * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb + * @ascb: pointer to an ascb + * @dl: pointer to the done list entry + * + * This function completes a CONTROL PHY scb and frees the ascb. + * A note on LEDs: + * - an LED blinks if there is IO though it, + * - if a device is connected to the LED, it is lit, + * - if no device is connected to the LED, is is dimmed (off). + */ +static void control_phy_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct scb *scb = ascb->scb; + struct control_phy *control_phy = &scb->control_phy; + u8 phy_id = control_phy->phy_id; + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + + u8 status = dl->status_block[0]; + u8 oob_status = dl->status_block[1]; + u8 oob_mode = dl->status_block[2]; + /* u8 oob_signals= dl->status_block[3]; */ + + if (status != 0) { + ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n", + __FUNCTION__, phy_id, status); + goto out; + } + + switch (control_phy->sub_func) { + case DISABLE_PHY: + asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id); + asd_turn_led(asd_ha, phy_id, 0); + asd_control_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id); + break; + + case ENABLE_PHY: + asd_control_led(asd_ha, phy_id, 1); + if (oob_status & CURRENT_OOB_DONE) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + get_lrate_mode(phy, oob_mode); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n", + __FUNCTION__, phy_id,phy->sas_phy.linkrate, + phy->sas_phy.iproto); + } else if (oob_status & CURRENT_SPINUP_HOLD) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__, + phy_id); + } else if (oob_status & CURRENT_ERR_MASK) { + asd_turn_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n", + __FUNCTION__, phy_id, oob_status); + } else if (oob_status & (CURRENT_HOT_PLUG_CNCT + | CURRENT_DEVICE_PRESENT)) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d: hot plug or device present\n", + __FUNCTION__, phy_id); + } else { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: phy%d: no device present: " + "oob_status:0x%x\n", + __FUNCTION__, phy_id, oob_status); + } + break; + case RELEASE_SPINUP_HOLD: + case PHY_NO_OP: + case EXECUTE_HARD_RESET: + ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__, + phy_id, control_phy->sub_func); + /* XXX finish */ + break; + default: + ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__, + phy_id, control_phy->sub_func); + break; + } +out: + asd_ascb_free(ascb); +} + +static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) +{ + /* disable all speeds, then enable defaults */ + *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS + | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS; + + switch (pd->max_sas_lrate) { + case PHY_LINKRATE_6: + *speed_mask &= ~SAS_SPEED_60_DIS; + default: + case PHY_LINKRATE_3: + *speed_mask &= ~SAS_SPEED_30_DIS; + case PHY_LINKRATE_1_5: + *speed_mask &= ~SAS_SPEED_15_DIS; + } + + switch (pd->min_sas_lrate) { + case PHY_LINKRATE_6: + *speed_mask |= SAS_SPEED_30_DIS; + case PHY_LINKRATE_3: + *speed_mask |= SAS_SPEED_15_DIS; + default: + case PHY_LINKRATE_1_5: + /* nothing to do */ + ; + } + + switch (pd->max_sata_lrate) { + case PHY_LINKRATE_3: + *speed_mask &= ~SATA_SPEED_30_DIS; + default: + case PHY_LINKRATE_1_5: + *speed_mask &= ~SATA_SPEED_15_DIS; + } + + switch (pd->min_sata_lrate) { + case PHY_LINKRATE_3: + *speed_mask |= SATA_SPEED_15_DIS; + default: + case PHY_LINKRATE_1_5: + /* nothing to do */ + ; + } +} + +/** + * asd_build_control_phy -- build a CONTROL PHY SCB + * @ascb: pointer to an ascb + * @phy_id: phy id to control, integer + * @subfunc: subfunction, what to actually to do the phy + * + * This function builds a CONTROL PHY scb. No allocation of any kind + * is performed. @ascb is allocated with the list function. + * The caller can override the ascb->tasklet_complete to point + * to its own callback function. It must call asd_ascb_free() + * at its tasklet complete function. + * See the default implementation. + */ +void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc) +{ + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + struct scb *scb = ascb->scb; + struct control_phy *control_phy = &scb->control_phy; + + scb->header.opcode = CONTROL_PHY; + control_phy->phy_id = (u8) phy_id; + control_phy->sub_func = subfunc; + + switch (subfunc) { + case EXECUTE_HARD_RESET: /* 0x81 */ + case ENABLE_PHY: /* 0x01 */ + /* decide hot plug delay */ + control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT; + + /* decide speed mask */ + set_speed_mask(&control_phy->speed_mask, phy->phy_desc); + + /* initiator port settings are in the hi nibble */ + if (phy->sas_phy.role == PHY_ROLE_INITIATOR) + control_phy->port_type = SAS_PROTO_ALL << 4; + else if (phy->sas_phy.role == PHY_ROLE_TARGET) + control_phy->port_type = SAS_PROTO_ALL; + else + control_phy->port_type = + (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL; + + /* link reset retries, this should be nominal */ + control_phy->link_reset_retries = 10; + + case RELEASE_SPINUP_HOLD: /* 0x02 */ + /* decide the func_mask */ + control_phy->func_mask = FUNCTION_MASK_DEFAULT; + if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD) + control_phy->func_mask &= ~SPINUP_HOLD_DIS; + else + control_phy->func_mask |= SPINUP_HOLD_DIS; + } + + control_phy->conn_handle = cpu_to_le16(0xFFFF); + + ascb->tasklet_complete = control_phy_tasklet_complete; +} + +/* ---------- INITIATE LINK ADM TASK ---------- */ + +static void link_adm_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + u8 opcode = dl->opcode; + struct initiate_link_adm *link_adm = &ascb->scb->link_adm; + u8 phy_id = link_adm->phy_id; + + if (opcode != TC_NO_ERROR) { + asd_printk("phy%d: link adm task 0x%x completed with error " + "0x%x\n", phy_id, link_adm->sub_func, opcode); + } + ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n", + phy_id, link_adm->sub_func, opcode); + + asd_ascb_free(ascb); +} + +void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, + u8 subfunc) +{ + struct scb *scb = ascb->scb; + struct initiate_link_adm *link_adm = &scb->link_adm; + + scb->header.opcode = INITIATE_LINK_ADM_TASK; + + link_adm->phy_id = phy_id; + link_adm->sub_func = subfunc; + link_adm->conn_handle = cpu_to_le16(0xFFFF); + + ascb->tasklet_complete = link_adm_tasklet_complete; +} + +/* ---------- SCB timer ---------- */ + +/** + * asd_ascb_timedout -- called when a pending SCB's timer has expired + * @data: unsigned long, a pointer to the ascb in question + * + * This is the default timeout function which does the most necessary. + * Upper layers can implement their own timeout function, say to free + * resources they have with this SCB, and then call this one at the + * end of their timeout function. To do this, one should initialize + * the ascb->timer.{function, data, expires} prior to calling the post + * funcion. The timer is started by the post function. + */ +void asd_ascb_timedout(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + struct asd_seq_data *seq = &ascb->ha->seq; + unsigned long flags; + + ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode); + + spin_lock_irqsave(&seq->pend_q_lock, flags); + seq->pending--; + list_del_init(&ascb->list); + spin_unlock_irqrestore(&seq->pend_q_lock, flags); + + asd_ascb_free(ascb); +} + +/* ---------- CONTROL PHY ---------- */ + +/* Given the spec value, return a driver value. */ +static const int phy_func_table[] = { + [PHY_FUNC_NOP] = PHY_NO_OP, + [PHY_FUNC_LINK_RESET] = ENABLE_PHY, + [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET, + [PHY_FUNC_DISABLE] = DISABLE_PHY, + [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD, +}; + +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func) +{ + struct asd_ha_struct *asd_ha = phy->ha->lldd_ha; + struct asd_ascb *ascb; + int res = 1; + + if (func == PHY_FUNC_CLEAR_ERROR_LOG) + return -ENOSYS; + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + + asd_build_control_phy(ascb, phy->id, phy_func_table[func]); + res = asd_post_ascb_list(asd_ha, ascb , 1); + if (res) + asd_ascb_free(ascb); + + return res; +} diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c new file mode 100644 index 0000000..eec1e0d --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sds.c @@ -0,0 +1,1136 @@ +/* + * Aic94xx SAS/SATA driver access to shared data structures and memory + * maps. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include + +#include "aic94xx.h" +#include "aic94xx_reg.h" + +/* ---------- OCM stuff ---------- */ + +struct asd_ocm_dir_ent { + u8 type; + u8 offs[3]; + u8 _r1; + u8 size[3]; +} __attribute__ ((packed)); + +struct asd_ocm_dir { + char sig[2]; + u8 _r1[2]; + u8 major; /* 0 */ + u8 minor; /* 0 */ + u8 _r2; + u8 num_de; + struct asd_ocm_dir_ent entry[15]; +} __attribute__ ((packed)); + +#define OCM_DE_OCM_DIR 0x00 +#define OCM_DE_WIN_DRVR 0x01 +#define OCM_DE_BIOS_CHIM 0x02 +#define OCM_DE_RAID_ENGN 0x03 +#define OCM_DE_BIOS_INTL 0x04 +#define OCM_DE_BIOS_CHIM_OSM 0x05 +#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06 +#define OCM_DE_ADDC2C_RES0 0x07 +#define OCM_DE_ADDC2C_RES1 0x08 +#define OCM_DE_ADDC2C_RES2 0x09 +#define OCM_DE_ADDC2C_RES3 0x0A + +#define OCM_INIT_DIR_ENTRIES 5 +/*************************************************************************** +* OCM dircetory default +***************************************************************************/ +static struct asd_ocm_dir OCMDirInit = +{ + .sig = {0x4D, 0x4F}, /* signature */ + .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */ +}; + +/*************************************************************************** +* OCM dircetory Entries default +***************************************************************************/ +static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] = +{ + { + .type = (OCM_DE_ADDC2C_RES0), /* Entry type */ + .offs = {128}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES1), /* Entry type */ + .offs = {128, 4}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES2), /* Entry type */ + .offs = {128, 8}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES3), /* Entry type */ + .offs = {128, 12}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_WIN_DRVR), /* Entry type */ + .offs = {128, 16}, /* Offset */ + .size = {128, 235, 1}, /* size */ + }, +}; + +struct asd_bios_chim_struct { + char sig[4]; + u8 major; /* 1 */ + u8 minor; /* 0 */ + u8 bios_major; + u8 bios_minor; + __le32 bios_build; + u8 flags; + u8 pci_slot; + __le16 ue_num; + __le16 ue_size; + u8 _r[14]; + /* The unit element array is right here. + */ +} __attribute__ ((packed)); + +/** + * asd_read_ocm_seg - read an on chip memory (OCM) segment + * @asd_ha: pointer to the host adapter structure + * @buffer: where to write the read data + * @offs: offset into OCM where to read from + * @size: how many bytes to read + * + * Return the number of bytes not read. Return 0 on success. + */ +static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, + u32 offs, int size) +{ + u8 *p = buffer; + if (unlikely(asd_ha->iospace)) + asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); + else { + for ( ; size > 0; size--, offs++, p++) + *p = asd_read_ocm_byte(asd_ha, offs); + } + return size; +} + +static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha, + struct asd_ocm_dir *dir, u32 offs) +{ + int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir)); + if (err) { + ASD_DPRINTK("couldn't read ocm segment\n"); + return err; + } + + if (dir->sig[0] != 'M' || dir->sig[1] != 'O') { + ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n", + dir->sig[0], dir->sig[1]); + return -ENOENT; + } + if (dir->major != 0) { + asd_printk("unsupported major version of ocm dir:0x%x\n", + dir->major); + return -ENOENT; + } + dir->num_de &= 0xf; + return 0; +} + +/** + * asd_write_ocm_seg - write an on chip memory (OCM) segment + * @asd_ha: pointer to the host adapter structure + * @buffer: where to read the write data + * @offs: offset into OCM to write to + * @size: how many bytes to write + * + * Return the number of bytes not written. Return 0 on success. + */ +static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, + u32 offs, int size) +{ + u8 *p = buffer; + if (unlikely(asd_ha->iospace)) + asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); + else { + for ( ; size > 0; size--, offs++, p++) + asd_write_ocm_byte(asd_ha, offs, *p); + } + return; +} + +#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16)) + +static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type, + u32 *offs, u32 *size) +{ + int i; + struct asd_ocm_dir_ent *ent; + + for (i = 0; i < dir->num_de; i++) { + if (dir->entry[i].type == type) + break; + } + if (i >= dir->num_de) + return -ENOENT; + ent = &dir->entry[i]; + *offs = (u32) THREE_TO_NUM(ent->offs); + *size = (u32) THREE_TO_NUM(ent->size); + return 0; +} + +#define OCM_BIOS_CHIM_DE 2 +#define BC_BIOS_PRESENT 1 + +static int asd_get_bios_chim(struct asd_ha_struct *asd_ha, + struct asd_ocm_dir *dir) +{ + int err; + struct asd_bios_chim_struct *bc_struct; + u32 offs, size; + + err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size); + if (err) { + ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n"); + goto out; + } + err = -ENOMEM; + bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL); + if (!bc_struct) { + asd_printk("no memory for bios_chim struct\n"); + goto out; + } + err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs, + sizeof(*bc_struct)); + if (err) { + ASD_DPRINTK("couldn't read ocm segment\n"); + goto out2; + } + if (strncmp(bc_struct->sig, "SOIB", 4) + && strncmp(bc_struct->sig, "IPSA", 4)) { + ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n", + bc_struct->sig[0], bc_struct->sig[1], + bc_struct->sig[2], bc_struct->sig[3]); + err = -ENOENT; + goto out2; + } + if (bc_struct->major != 1) { + asd_printk("BIOS_CHIM unsupported major version:0x%x\n", + bc_struct->major); + err = -ENOENT; + goto out2; + } + if (bc_struct->flags & BC_BIOS_PRESENT) { + asd_ha->hw_prof.bios.present = 1; + asd_ha->hw_prof.bios.maj = bc_struct->bios_major; + asd_ha->hw_prof.bios.min = bc_struct->bios_minor; + asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build); + ASD_DPRINTK("BIOS present (%d,%d), %d\n", + asd_ha->hw_prof.bios.maj, + asd_ha->hw_prof.bios.min, + asd_ha->hw_prof.bios.bld); + } + asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num); + asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size); + ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num, + asd_ha->hw_prof.ue.size); + size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size; + if (size > 0) { + err = -ENOMEM; + asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL); + if (!asd_ha->hw_prof.ue.area) + goto out2; + err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area, + offs + sizeof(*bc_struct), size); + if (err) { + kfree(asd_ha->hw_prof.ue.area); + asd_ha->hw_prof.ue.area = NULL; + asd_ha->hw_prof.ue.num = 0; + asd_ha->hw_prof.ue.size = 0; + ASD_DPRINTK("couldn't read ue entries(%d)\n", err); + } + } +out2: + kfree(bc_struct); +out: + return err; +} + +static void +asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha) +{ + int i; + + /* Zero OCM */ + for (i = 0; i < OCM_MAX_SIZE; i += 4) + asd_write_ocm_dword(asd_ha, i, 0); + + /* Write Dir */ + asd_write_ocm_seg(asd_ha, &OCMDirInit, 0, + sizeof(struct asd_ocm_dir)); + + /* Write Dir Entries */ + for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++) + asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i], + sizeof(struct asd_ocm_dir) + + (i * sizeof(struct asd_ocm_dir_ent)) + , sizeof(struct asd_ocm_dir_ent)); + +} + +static int +asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha) +{ + struct pci_dev *pcidev = asd_ha->pcidev; + u32 reg; + int err = 0; + u32 v; + + /* check if OCM has been initialized by BIOS */ + reg = asd_read_reg_dword(asd_ha, EXSICNFGR); + + if (!(reg & OCMINITIALIZED)) { + err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v); + if (err) { + asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n", + pci_name(pcidev)); + goto out; + } + + printk(KERN_INFO "OCM is not initialized by BIOS," + "reinitialize it and ignore it, current IntrptStatus" + "is 0x%x\n", v); + + if (v) + err = pci_write_config_dword(pcidev, + PCIC_INTRPT_STAT, v); + if (err) { + asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n", + pci_name(pcidev)); + goto out; + } + + asd_hwi_initialize_ocm_dir(asd_ha); + + } +out: + return err; +} + +/** + * asd_read_ocm - read on chip memory (OCM) + * @asd_ha: pointer to the host adapter structure + */ +int asd_read_ocm(struct asd_ha_struct *asd_ha) +{ + int err; + struct asd_ocm_dir *dir; + + if (asd_hwi_check_ocm_access(asd_ha)) + return -1; + + dir = kmalloc(sizeof(*dir), GFP_KERNEL); + if (!dir) { + asd_printk("no memory for ocm dir\n"); + return -ENOMEM; + } + + err = asd_read_ocm_dir(asd_ha, dir, 0); + if (err) + goto out; + + err = asd_get_bios_chim(asd_ha, dir); +out: + kfree(dir); + return err; +} + +/* ---------- FLASH stuff ---------- */ + +#define FLASH_RESET 0xF0 +#define FLASH_MANUF_AMD 1 + +#define FLASH_SIZE 0x200000 +#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " +#define FLASH_NEXT_ENTRY_OFFS 0x2000 +#define FLASH_MAX_DIR_ENTRIES 32 + +#define FLASH_DE_TYPE_MASK 0x3FFFFFFF +#define FLASH_DE_MS 0x120 +#define FLASH_DE_CTRL_A_USER 0xE0 + +struct asd_flash_de { + __le32 type; + __le32 offs; + __le32 pad_size; + __le32 image_size; + __le32 chksum; + u8 _r[12]; + u8 version[32]; +} __attribute__ ((packed)); + +struct asd_flash_dir { + u8 cookie[32]; + __le32 rev; /* 2 */ + __le32 chksum; + __le32 chksum_antidote; + __le32 bld; + u8 bld_id[32]; /* build id data */ + u8 ver_data[32]; /* date and time of build */ + __le32 ae_mask; + __le32 v_mask; + __le32 oc_mask; + u8 _r[20]; + struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES]; +} __attribute__ ((packed)); + +struct asd_manuf_sec { + char sig[2]; /* 'S', 'M' */ + u16 offs_next; + u8 maj; /* 0 */ + u8 min; /* 0 */ + u16 chksum; + u16 size; + u8 _r[6]; + u8 sas_addr[SAS_ADDR_SIZE]; + u8 pcba_sn[ASD_PCBA_SN_SIZE]; + /* Here start the other segments */ + u8 linked_list[0]; +} __attribute__ ((packed)); + +struct asd_manuf_phy_desc { + u8 state; /* low 4 bits */ +#define MS_PHY_STATE_ENABLEABLE 0 +#define MS_PHY_STATE_REPORTED 1 +#define MS_PHY_STATE_HIDDEN 2 + u8 phy_id; + u16 _r; + u8 phy_control_0; /* mode 5 reg 0x160 */ + u8 phy_control_1; /* mode 5 reg 0x161 */ + u8 phy_control_2; /* mode 5 reg 0x162 */ + u8 phy_control_3; /* mode 5 reg 0x163 */ +} __attribute__ ((packed)); + +struct asd_manuf_phy_param { + char sig[2]; /* 'P', 'M' */ + u16 next; + u8 maj; /* 0 */ + u8 min; /* 2 */ + u8 num_phy_desc; /* 8 */ + u8 phy_desc_size; /* 8 */ + u8 _r[3]; + u8 usage_model_id; + u32 _r2; + struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_sb_type[] = { + "unknown", + "SGPIO", + [2 ... 0x7F] = "unknown", + [0x80] = "ADPT_I2C", + [0x81 ... 0xFF] = "VENDOR_UNIQUExx" +}; +#endif + +struct asd_ms_sb_desc { + u8 type; + u8 node_desc_index; + u8 conn_desc_index; + u8 _recvd[0]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_conn_type[] = { + [0 ... 7] = "unknown", + "SFF8470", + "SFF8482", + "SFF8484", + [0x80] = "PCIX_DAUGHTER0", + [0x81] = "SAS_DAUGHTER0", + [0x82 ... 0xFF] = "VENDOR_UNIQUExx" +}; + +static const char *asd_conn_location[] = { + "unknown", + "internal", + "external", + "board_to_board", +}; +#endif + +struct asd_ms_conn_desc { + u8 type; + u8 location; + u8 num_sideband_desc; + u8 size_sideband_desc; + u32 _resvd; + u8 name[16]; + struct asd_ms_sb_desc sb_desc[0]; +} __attribute__ ((packed)); + +struct asd_nd_phy_desc { + u8 vp_attch_type; + u8 attch_specific[0]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_node_type[] = { + "IOP", + "IO_CONTROLLER", + "EXPANDER", + "PORT_MULTIPLIER", + "PORT_MULTIPLEXER", + "MULTI_DROP_I2C_BUS", +}; +#endif + +struct asd_ms_node_desc { + u8 type; + u8 num_phy_desc; + u8 size_phy_desc; + u8 _resvd; + u8 name[16]; + struct asd_nd_phy_desc phy_desc[0]; +} __attribute__ ((packed)); + +struct asd_ms_conn_map { + char sig[2]; /* 'M', 'C' */ + __le16 next; + u8 maj; /* 0 */ + u8 min; /* 0 */ + __le16 cm_size; /* size of this struct */ + u8 num_conn; + u8 conn_size; + u8 num_nodes; + u8 usage_model_id; + u32 _resvd; + struct asd_ms_conn_desc conn_desc[0]; + struct asd_ms_node_desc node_desc[0]; +} __attribute__ ((packed)); + +struct asd_ctrla_phy_entry { + u8 sas_addr[SAS_ADDR_SIZE]; + u8 sas_link_rates; /* max in hi bits, min in low bits */ + u8 flags; + u8 sata_link_rates; + u8 _r[5]; +} __attribute__ ((packed)); + +struct asd_ctrla_phy_settings { + u8 id0; /* P'h'y */ + u8 _r; + u16 next; + u8 num_phys; /* number of PHYs in the PCI function */ + u8 _r2[3]; + struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS]; +} __attribute__ ((packed)); + +struct asd_ll_el { + u8 id0; + u8 id1; + __le16 next; + u8 something_here[0]; +} __attribute__ ((packed)); + +static int asd_poll_flash(struct asd_ha_struct *asd_ha) +{ + int c; + u8 d; + + for (c = 5000; c > 0; c--) { + d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); + d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); + if (!d) + return 0; + udelay(5); + } + return -ENOENT; +} + +static int asd_reset_flash(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_poll_flash(asd_ha); + if (err) + return err; + asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET); + err = asd_poll_flash(asd_ha); + + return err; +} + +static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha, + void *buffer, u32 offs, int size) +{ + asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs, + size); + return 0; +} + +/** + * asd_find_flash_dir - finds and reads the flash directory + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to flash directory structure + * + * If found, the flash directory segment will be copied to + * @flash_dir. Return 1 if found, 0 if not. + */ +static int asd_find_flash_dir(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + u32 v; + for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { + asd_read_flash_seg(asd_ha, flash_dir, v, + sizeof(FLASH_DIR_COOKIE)-1); + if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE, + sizeof(FLASH_DIR_COOKIE)-1) == 0) { + asd_ha->hw_prof.flash.dir_offs = v; + asd_read_flash_seg(asd_ha, flash_dir, v, + sizeof(*flash_dir)); + return 1; + } + } + return 0; +} + +static int asd_flash_getid(struct asd_ha_struct *asd_ha) +{ + int err = 0; + u32 reg, inc; + + reg = asd_read_reg_dword(asd_ha, EXSICNFGR); + + if (!(reg & FLASHEX)) { + ASD_DPRINTK("flash doesn't exist\n"); + return -ENOENT; + } + if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR, + &asd_ha->hw_prof.flash.bar)) { + asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n", + pci_name(asd_ha->pcidev)); + return -ENOENT; + } + asd_ha->hw_prof.flash.present = 1; + asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0; + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash(%d)\n", err); + return err; + } + /* Get flash info. This would most likely be AMD Am29LV family flash. + * First try the sequence for word mode. It is the same as for + * 008B (byte mode only), 160B (word mode) and 800D (word mode). + */ + reg = asd_ha->hw_prof.flash.bar; + inc = asd_ha->hw_prof.flash.wide ? 2 : 1; + asd_write_reg_byte(asd_ha, reg + 0x555, 0xAA); + asd_write_reg_byte(asd_ha, reg + 0x2AA, 0x55); + asd_write_reg_byte(asd_ha, reg + 0x555, 0x90); + asd_ha->hw_prof.flash.manuf = asd_read_reg_byte(asd_ha, reg); + asd_ha->hw_prof.flash.dev_id= asd_read_reg_byte(asd_ha,reg+inc); + asd_ha->hw_prof.flash.sec_prot = asd_read_reg_byte(asd_ha,reg+inc+inc); + /* Get out of autoselect mode. */ + err = asd_reset_flash(asd_ha); + + if (asd_ha->hw_prof.flash.manuf == FLASH_MANUF_AMD) { + ASD_DPRINTK("0Found FLASH(%d) manuf:%d, dev_id:0x%x, " + "sec_prot:%d\n", + asd_ha->hw_prof.flash.wide ? 16 : 8, + asd_ha->hw_prof.flash.manuf, + asd_ha->hw_prof.flash.dev_id, + asd_ha->hw_prof.flash.sec_prot); + return 0; + } + + /* Ok, try the sequence for byte mode of 160B and 800D. + * We may actually never need this. + */ + asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA); + asd_write_reg_byte(asd_ha, reg + 0x555, 0x55); + asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90); + asd_ha->hw_prof.flash.manuf = asd_read_reg_byte(asd_ha, reg); + asd_ha->hw_prof.flash.dev_id = asd_read_reg_byte(asd_ha, reg + 2); + asd_ha->hw_prof.flash.sec_prot = asd_read_reg_byte(asd_ha, reg + 4); + err = asd_reset_flash(asd_ha); + + if (asd_ha->hw_prof.flash.manuf == FLASH_MANUF_AMD) { + ASD_DPRINTK("1Found FLASH(%d) manuf:%d, dev_id:0x%x, " + "sec_prot:%d\n", + asd_ha->hw_prof.flash.wide ? 16 : 8, + asd_ha->hw_prof.flash.manuf, + asd_ha->hw_prof.flash.dev_id, + asd_ha->hw_prof.flash.sec_prot); + return 0; + } + + return -ENOENT; +} + +static u16 asd_calc_flash_chksum(u16 *p, int size) +{ + u16 chksum = 0; + + while (size-- > 0) + chksum += *p++; + + return chksum; +} + + +static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type, + u32 *offs, u32 *size) +{ + int i; + struct asd_flash_de *de; + + for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) { + u32 type = le32_to_cpu(flash_dir->dir_entry[i].type); + + type &= FLASH_DE_TYPE_MASK; + if (type == entry_type) + break; + } + if (i >= FLASH_MAX_DIR_ENTRIES) + return -ENOENT; + de = &flash_dir->dir_entry[i]; + *offs = le32_to_cpu(de->offs); + *size = le32_to_cpu(de->pad_size); + return 0; +} + +static int asd_validate_ms(struct asd_manuf_sec *ms) +{ + if (ms->sig[0] != 'S' || ms->sig[1] != 'M') { + ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n", + ms->sig[0], ms->sig[1]); + return -ENOENT; + } + if (ms->maj != 0) { + asd_printk("unsupported manuf. sector. major version:%x\n", + ms->maj); + return -ENOENT; + } + ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next); + ms->chksum = le16_to_cpu((__force __le16) ms->chksum); + ms->size = le16_to_cpu((__force __le16) ms->size); + + if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) { + asd_printk("failed manuf sector checksum\n"); + } + + return 0; +} + +static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *ms) +{ + memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE); + return 0; +} + +static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *ms) +{ + memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE); + asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0'; + return 0; +} + +/** + * asd_find_ll_by_id - find a linked list entry by its id + * @start: void pointer to the first element in the linked list + * @id0: the first byte of the id (offs 0) + * @id1: the second byte of the id (offs 1) + * + * @start has to be the _base_ element start, since the + * linked list entries's offset is from this pointer. + * Some linked list entries use only the first id, in which case + * you can pass 0xFF for the second. + */ +static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1) +{ + struct asd_ll_el *el = start; + + do { + switch (id1) { + default: + if (el->id1 == id1) + case 0xFF: + if (el->id0 == id0) + return el; + } + el = start + le16_to_cpu(el->next); + } while (el != start); + + return NULL; +} + +/** + * asd_ms_get_phy_params - get phy parameters from the manufacturing sector + * @asd_ha: pointer to the host adapter structure + * @manuf_sec: pointer to the manufacturing sector + * + * The manufacturing sector contans also the linked list of sub-segments, + * since when it was read, its size was taken from the flash directory, + * not from the structure size. + * + * HIDDEN phys do not count in the total count. REPORTED phys cannot + * be enabled but are reported and counted towards the total. + * ENEBLEABLE phys are enabled by default and count towards the total. + * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys + * merely specifies the number of phys the host adapter decided to + * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, + * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE. + * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 + * are actually enabled (enabled by default, max number of phys + * enableable in this case). + */ +static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *manuf_sec) +{ + int i; + int en_phys = 0; + int rep_phys = 0; + struct asd_manuf_phy_param *phy_param; + struct asd_manuf_phy_param dflt_phy_param; + + phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M'); + if (!phy_param) { + ASD_DPRINTK("ms: no phy parameters found\n"); + ASD_DPRINTK("ms: Creating default phy parameters\n"); + dflt_phy_param.sig[0] = 'P'; + dflt_phy_param.sig[1] = 'M'; + dflt_phy_param.maj = 0; + dflt_phy_param.min = 2; + dflt_phy_param.num_phy_desc = 8; + dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc); + for (i =0; i < ASD_MAX_PHYS; i++) { + dflt_phy_param.phy_desc[i].state = 0; + dflt_phy_param.phy_desc[i].phy_id = i; + dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6; + dflt_phy_param.phy_desc[i].phy_control_1 = 0x10; + dflt_phy_param.phy_desc[i].phy_control_2 = 0x43; + dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb; + } + + phy_param = &dflt_phy_param; + + } + + if (phy_param->maj != 0) { + asd_printk("unsupported manuf. phy param major version:0x%x\n", + phy_param->maj); + return -ENOENT; + } + + ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc); + asd_ha->hw_prof.enabled_phys = 0; + for (i = 0; i < phy_param->num_phy_desc; i++) { + struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i]; + switch (pd->state & 0xF) { + case MS_PHY_STATE_HIDDEN: + ASD_DPRINTK("ms: phy%d: HIDDEN\n", i); + continue; + case MS_PHY_STATE_REPORTED: + ASD_DPRINTK("ms: phy%d: REPORTED\n", i); + asd_ha->hw_prof.enabled_phys &= ~(1 << i); + rep_phys++; + continue; + case MS_PHY_STATE_ENABLEABLE: + ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i); + asd_ha->hw_prof.enabled_phys |= (1 << i); + en_phys++; + break; + } + asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0; + asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1; + asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2; + asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3; + } + asd_ha->hw_prof.max_phys = rep_phys + en_phys; + asd_ha->hw_prof.num_phys = en_phys; + ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n", + asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys); + ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys); + return 0; +} + +static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *manuf_sec) +{ + struct asd_ms_conn_map *cm; + + cm = asd_find_ll_by_id(manuf_sec, 'M', 'C'); + if (!cm) { + ASD_DPRINTK("ms: no connector map found\n"); + return 0; + } + + if (cm->maj != 0) { + ASD_DPRINTK("ms: unsupported: connector map major version 0x%x" + "\n", cm->maj); + return -ENOENT; + } + + /* XXX */ + + return 0; +} + + +/** + * asd_process_ms - find and extract information from the manufacturing sector + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to the flash directory + */ +static int asd_process_ms(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + int err; + struct asd_manuf_sec *manuf_sec; + u32 offs, size; + + err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size); + if (err) { + ASD_DPRINTK("Couldn't find the manuf. sector\n"); + goto out; + } + + if (size == 0) + goto out; + + err = -ENOMEM; + manuf_sec = kmalloc(size, GFP_KERNEL); + if (!manuf_sec) { + ASD_DPRINTK("no mem for manuf sector\n"); + goto out; + } + + err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size); + if (err) { + ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n", + offs, size); + goto out2; + } + + err = asd_validate_ms(manuf_sec); + if (err) { + ASD_DPRINTK("couldn't validate manuf sector\n"); + goto out2; + } + + err = asd_ms_get_sas_addr(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("couldn't read the SAS_ADDR\n"); + goto out2; + } + ASD_DPRINTK("manuf sect SAS_ADDR %llx\n", + SAS_ADDR(asd_ha->hw_prof.sas_addr)); + + err = asd_ms_get_pcba_sn(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("couldn't read the PCBA SN\n"); + goto out2; + } + ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn); + + err = asd_ms_get_phy_params(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("ms: couldn't get phy parameters\n"); + goto out2; + } + + err = asd_ms_get_connector_map(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("ms: couldn't get connector map\n"); + goto out2; + } + +out2: + kfree(manuf_sec); +out: + return err; +} + +static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha, + struct asd_ctrla_phy_settings *ps) +{ + int i; + for (i = 0; i < ps->num_phys; i++) { + struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i]; + + if (!PHY_ENABLED(asd_ha, i)) + continue; + if (*(u64 *)pe->sas_addr == 0) { + asd_ha->hw_prof.enabled_phys &= ~(1 << i); + continue; + } + /* This is the SAS address which should be sent in IDENTIFY. */ + memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr, + SAS_ADDR_SIZE); + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = + (pe->sas_link_rates & 0xF0) >> 4; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = + (pe->sas_link_rates & 0x0F); + asd_ha->hw_prof.phy_desc[i].max_sata_lrate = + (pe->sata_link_rates & 0xF0) >> 4; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate = + (pe->sata_link_rates & 0x0F); + asd_ha->hw_prof.phy_desc[i].flags = pe->flags; + ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x," + " sata rate:0x%x-0x%x, flags:0x%x\n", + i, + SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr), + asd_ha->hw_prof.phy_desc[i].max_sas_lrate, + asd_ha->hw_prof.phy_desc[i].min_sas_lrate, + asd_ha->hw_prof.phy_desc[i].max_sata_lrate, + asd_ha->hw_prof.phy_desc[i].min_sata_lrate, + asd_ha->hw_prof.phy_desc[i].flags); + } + + return 0; +} + +/** + * asd_process_ctrl_a_user - process CTRL-A user settings + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to the flash directory + */ +static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + int err, i; + u32 offs, size; + struct asd_ll_el *el; + struct asd_ctrla_phy_settings *ps; + struct asd_ctrla_phy_settings dflt_ps; + + err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size); + if (err) { + ASD_DPRINTK("couldn't find CTRL-A user settings section\n"); + ASD_DPRINTK("Creating default CTRL-A user settings section\n"); + + dflt_ps.id0 = 'h'; + dflt_ps.num_phys = 8; + for (i =0; i < ASD_MAX_PHYS; i++) { + memcpy(dflt_ps.phy_ent[i].sas_addr, + asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); + dflt_ps.phy_ent[i].sas_link_rates = 0x98; + dflt_ps.phy_ent[i].flags = 0x0; + dflt_ps.phy_ent[i].sata_link_rates = 0x0; + } + + size = sizeof(struct asd_ctrla_phy_settings); + ps = &dflt_ps; + } + + if (size == 0) + goto out; + + err = -ENOMEM; + el = kmalloc(size, GFP_KERNEL); + if (!el) { + ASD_DPRINTK("no mem for ctrla user settings section\n"); + goto out; + } + + err = asd_read_flash_seg(asd_ha, (void *)el, offs, size); + if (err) { + ASD_DPRINTK("couldn't read ctrla phy settings section\n"); + goto out2; + } + + err = -ENOENT; + ps = asd_find_ll_by_id(el, 'h', 0xFF); + if (!ps) { + ASD_DPRINTK("couldn't find ctrla phy settings struct\n"); + goto out2; + } + + err = asd_process_ctrla_phy_settings(asd_ha, ps); + if (err) { + ASD_DPRINTK("couldn't process ctrla phy settings\n"); + goto out2; + } +out2: + kfree(el); +out: + return err; +} + +/** + * asd_read_flash - read flash memory + * @asd_ha: pointer to the host adapter structure + */ +int asd_read_flash(struct asd_ha_struct *asd_ha) +{ + int err; + struct asd_flash_dir *flash_dir; + + err = asd_flash_getid(asd_ha); + if (err) + return err; + + flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL); + if (!flash_dir) + return -ENOMEM; + + err = -ENOENT; + if (!asd_find_flash_dir(asd_ha, flash_dir)) { + ASD_DPRINTK("couldn't find flash directory\n"); + goto out; + } + + if (le32_to_cpu(flash_dir->rev) != 2) { + asd_printk("unsupported flash dir version:0x%x\n", + le32_to_cpu(flash_dir->rev)); + goto out; + } + + err = asd_process_ms(asd_ha, flash_dir); + if (err) { + ASD_DPRINTK("couldn't process manuf sector settings\n"); + goto out; + } + + err = asd_process_ctrl_a_user(asd_ha, flash_dir); + if (err) { + ASD_DPRINTK("couldn't process CTRL-A user settings\n"); + goto out; + } + +out: + kfree(flash_dir); + return err; +} diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c new file mode 100644 index 0000000..9050c6f3f --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_seq.c @@ -0,0 +1,1401 @@ +/* + * Aic94xx SAS/SATA driver sequencer interface. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * Parts of this code adapted from David Chaw's adp94xx_seq.c. + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" + +#include "aic94xx_seq.h" +#include "aic94xx_dump.h" + +/* It takes no more than 0.05 us for an instruction + * to complete. So waiting for 1 us should be more than + * plenty. + */ +#define PAUSE_DELAY 1 +#define PAUSE_TRIES 1000 + +static const struct firmware *sequencer_fw; +static const char *sequencer_version; +static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task, + cseq_idle_loop, lseq_idle_loop; +static u8 *cseq_code, *lseq_code; +static u32 cseq_code_size, lseq_code_size; + +static u16 first_scb_site_no = 0xFFFF; +static u16 last_scb_site_no; + +/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */ + +/** + * asd_pause_cseq - pause the central sequencer + * @asd_ha: pointer to host adapter structure + * + * Return 0 on success, negative on failure. + */ +int asd_pause_cseq(struct asd_ha_struct *asd_ha) +{ + int count = PAUSE_TRIES; + u32 arp2ctl; + + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (arp2ctl & PAUSED) + return 0; + + asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (arp2ctl & PAUSED) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't pause CSEQ\n"); + return -1; +} + +/** + * asd_unpause_cseq - unpause the central sequencer. + * @asd_ha: pointer to host adapter structure. + * + * Return 0 on success, negative on error. + */ +int asd_unpause_cseq(struct asd_ha_struct *asd_ha) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (!(arp2ctl & PAUSED)) + return 0; + + asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (!(arp2ctl & PAUSED)) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't unpause the CSEQ\n"); + return -1; +} + +/** + * asd_seq_pause_lseq - pause a link sequencer + * @asd_ha: pointer to a host adapter structure + * @lseq: link sequencer of interest + * + * Return 0 on success, negative on error. + */ +static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (arp2ctl & PAUSED) + return 0; + + asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (arp2ctl & PAUSED) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq); + return -1; +} + +/** + * asd_pause_lseq - pause the link sequencer(s) + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of link sequencers of interest + * + * Return 0 on success, negative on failure. + */ +int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + int err = 0; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_pause_lseq(asd_ha, lseq); + if (err) + return err; + } + + return err; +} + +/** + * asd_seq_unpause_lseq - unpause a link sequencer + * @asd_ha: pointer to host adapter structure + * @lseq: link sequencer of interest + * + * Return 0 on success, negative on error. + */ +static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (!(arp2ctl & PAUSED)) + return 0; + + asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (!(arp2ctl & PAUSED)) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq); + return 0; +} + + +/** + * asd_unpause_lseq - unpause the link sequencer(s) + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of link sequencers of interest + * + * Return 0 on success, negative on failure. + */ +int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + int err = 0; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_unpause_lseq(asd_ha, lseq); + if (err) + return err; + } + + return err; +} + +/* ---------- Downloading CSEQ/LSEQ microcode ---------- */ + +static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size) +{ + u32 addr = CSEQ_RAM_REG_BASE_ADR; + const u32 *prog = (u32 *) _prog; + u32 i; + + for (i = 0; i < size; i += 4, prog++, addr += 4) { + u32 val = asd_read_reg_dword(asd_ha, addr); + + if (le32_to_cpu(*prog) != val) { + asd_printk("%s: cseq verify failed at %u " + "read:0x%x, wanted:0x%x\n", + pci_name(asd_ha->pcidev), + i, val, le32_to_cpu(*prog)); + return -1; + } + } + ASD_DPRINTK("verified %d bytes, passed\n", size); + return 0; +} + +/** + * asd_verify_lseq - verify the microcode of a link sequencer + * @asd_ha: pointer to host adapter structure + * @_prog: pointer to the microcode + * @size: size of the microcode in bytes + * @lseq: link sequencer of interest + * + * The link sequencer code is accessed in 4 KB pages, which are selected + * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register. + * The 10 KB LSEQm instruction code is mapped, page at a time, at + * LmSEQRAM address. + */ +static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size, int lseq) +{ +#define LSEQ_CODEPAGE_SIZE 4096 + int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE; + u32 page; + const u32 *prog = (u32 *) _prog; + + for (page = 0; page < pages; page++) { + u32 i; + + asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq), + page << LmRAMPAGE_LSHIFT); + for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE; + i += 4, prog++, size-=4) { + + u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i); + + if (le32_to_cpu(*prog) != val) { + asd_printk("%s: LSEQ%d verify failed " + "page:%d, offs:%d\n", + pci_name(asd_ha->pcidev), + lseq, page, i); + return -1; + } + } + } + ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq, + (int)((u8 *)prog-_prog)); + return 0; +} + +/** + * asd_verify_seq -- verify CSEQ/LSEQ microcode + * @asd_ha: pointer to host adapter structure + * @prog: pointer to microcode + * @size: size of the microcode + * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest + * + * Return 0 if microcode is correct, negative on mismatch. + */ +static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog, + u32 size, u8 lseq_mask) +{ + if (lseq_mask == 0) + return asd_verify_cseq(asd_ha, prog, size); + else { + int lseq, err; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_verify_lseq(asd_ha, prog, size, lseq); + if (err) + return err; + } + } + + return 0; +} +#define ASD_DMA_MODE_DOWNLOAD +#ifdef ASD_DMA_MODE_DOWNLOAD +/* This is the size of the CSEQ Mapped instruction page */ +#define MAX_DMA_OVLY_COUNT ((1U << 14)-1) +static int asd_download_seq(struct asd_ha_struct *asd_ha, + const u8 * const prog, u32 size, u8 lseq_mask) +{ + u32 comstaten; + u32 reg; + int page; + const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT; + struct asd_dma_tok *token; + int err = 0; + + if (size % 4) { + asd_printk("sequencer program not multiple of 4\n"); + return -1; + } + + asd_pause_cseq(asd_ha); + asd_pause_lseq(asd_ha, 0xFF); + + /* save, disable and clear interrupts */ + comstaten = asd_read_reg_dword(asd_ha, COMSTATEN); + asd_write_reg_dword(asd_ha, COMSTATEN, 0); + asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK); + + asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); + asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK); + + token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL); + if (!token) { + asd_printk("out of memory for dma SEQ download\n"); + err = -ENOMEM; + goto out; + } + ASD_DPRINTK("dma-ing %d bytes\n", size); + + for (page = 0; page < pages; page++) { + int i; + u32 left = min(size-page*MAX_DMA_OVLY_COUNT, + (u32)MAX_DMA_OVLY_COUNT); + + memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left); + asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle); + asd_write_reg_dword(asd_ha, OVLYDMACNT, left); + reg = !page ? RESETOVLYDMA : 0; + reg |= (STARTOVLYDMA | OVLYHALTERR); + reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); + /* Start DMA. */ + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + for (i = PAUSE_TRIES*100; i > 0; i--) { + u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL); + if (!(dmadone & OVLYDMAACT)) + break; + udelay(PAUSE_DELAY); + } + } + + reg = asd_read_reg_dword(asd_ha, COMSTAT); + if (!(reg & OVLYDMADONE) || (reg & OVLYERR) + || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){ + asd_printk("%s: error DMA-ing sequencer code\n", + pci_name(asd_ha->pcidev)); + err = -ENODEV; + } + + asd_free_coherent(asd_ha, token); + out: + asd_write_reg_dword(asd_ha, COMSTATEN, comstaten); + + return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask); +} +#else /* ASD_DMA_MODE_DOWNLOAD */ +static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size, u8 lseq_mask) +{ + int i; + u32 reg = 0; + const u32 *prog = (u32 *) _prog; + + if (size % 4) { + asd_printk("sequencer program not multiple of 4\n"); + return -1; + } + + asd_pause_cseq(asd_ha); + asd_pause_lseq(asd_ha, 0xFF); + + reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); + reg |= PIOCMODE; + + asd_write_reg_dword(asd_ha, OVLYDMACNT, size); + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n", + lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : ""); + + for (i = 0; i < size; i += 4, prog++) + asd_write_reg_dword(asd_ha, SPIODATA, *prog); + + reg = (reg & ~PIOCMODE) | OVLYHALTERR; + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + return asd_verify_seq(asd_ha, _prog, size, lseq_mask); +} +#endif /* ASD_DMA_MODE_DOWNLOAD */ + +/** + * asd_seq_download_seqs - download the sequencer microcode + * @asd_ha: pointer to host adapter structure + * + * Download the central and link sequencer microcode. + */ +static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + + if (!asd_ha->hw_prof.enabled_phys) { + asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev)); + return -ENODEV; + } + + /* Download the CSEQ */ + ASD_DPRINTK("downloading CSEQ...\n"); + err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0); + if (err) { + asd_printk("CSEQ download failed:%d\n", err); + return err; + } + + /* Download the Link Sequencers code. All of the Link Sequencers + * microcode can be downloaded at the same time. + */ + ASD_DPRINTK("downloading LSEQs...\n"); + err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, + asd_ha->hw_prof.enabled_phys); + if (err) { + /* Try it one at a time */ + u8 lseq; + u8 lseq_mask = asd_ha->hw_prof.enabled_phys; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_download_seq(asd_ha, lseq_code, + lseq_code_size, 1<> 8; + asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx); + } + asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD, + first_scb_site_no); + asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL, + last_scb_site_no); + asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF); + + /* CSEQ Mode independent, page 7 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0); + asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0); + asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0); + asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0); +} + +/** + * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha) +{ + int i; + int moffs; + + moffs = CSEQ_PAGE_SIZE * 2; + + /* CSEQ Mode dependent, modes 0-7, page 0 setup. */ + for (i = 0; i < 8; i++) { + asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF); + asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0); + } + + /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */ + + /* CSEQ Mode dependent, mode 8, page 0 setup. */ + asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0); + asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0); + asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0); + asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0); + asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0); + asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0); + asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0); + asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE, + (u16)last_scb_site_no+1); + asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE, + (u16)asd_ha->hw_prof.max_ddbs); + + /* CSEQ Mode dependent, mode 8, page 1 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0); + + /* CSEQ Mode dependent, mode 8, page 2 setup. */ + /* Tell the sequencer the bus address of the first SCB. */ + asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER, + asd_ha->seq.next_scb.dma_handle); + ASD_DPRINTK("First SCB dma_handle: 0x%llx\n", + (unsigned long long)asd_ha->seq.next_scb.dma_handle); + + /* Tell the sequencer the first Done List entry address. */ + asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE, + asd_ha->seq.actual_dl->dma_handle); + + /* Initialize the Q_DONE_POINTER with the least significant + * 4 bytes of the first Done List address. */ + asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER, + ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle)); + + asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE); + + /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */ +} + +/** + * asd_init_cseq_scratch -- setup and init CSEQ + * @asd_ha: pointer to host adapter structure + * + * Setup and initialize Central sequencers. Initialiaze the mode + * independent and dependent scratch page to the default settings. + */ +static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha) +{ + asd_init_cseq_mip(asd_ha); + asd_init_cseq_mdp(asd_ha); +} + +/** + * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3 + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq) +{ + int i; + + /* LSEQ Mode independent page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq); + asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq), + ASD_NOTIFY_ENABLE_SPINUP); + asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000); + asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0); + + /* LSEQ Mode independent page 1 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0); + + /* LSEQ Mode Independent page 2 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0); + for (i = 0; i < 12; i += 4) + asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0); + + /* LSEQ Mode Independent page 3 setup. */ + + /* Device present timer timeout */ + asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq), + ASD_DEV_PRESENT_TIMEOUT); + + /* SATA interlock timer disabled */ + asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq), + ASD_SATA_INTERLOCK_TIMEOUT); + + /* STP shutdown timer timeout constant, IGNORED by the sequencer, + * always 0. */ + asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq), + ASD_STP_SHUTDOWN_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq), + ASD_SRST_ASSERT_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq), + ASD_RCV_FIS_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq), + ASD_ONE_MILLISEC_TIMEOUT); + + /* COM_INIT timer */ + asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq), + ASD_TEN_MILLISEC_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq), + ASD_SMP_RCV_TIMEOUT); +} + +/** + * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages. + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq) +{ + int i; + u32 moffs; + u16 ret_addr[] = { + 0xFFFF, /* mode 0 */ + 0xFFFF, /* mode 1 */ + mode2_task, /* mode 2 */ + 0, + 0xFFFF, /* mode 4/5 */ + 0xFFFF, /* mode 4/5 */ + }; + + /* + * Mode 0,1,2 and 4/5 have common field on page 0 for the first + * 14 bytes. + */ + for (i = 0; i < 3; i++) { + moffs = i * LSEQ_MODE_SCRATCH_SIZE; + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs, + ret_addr[i]); + asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0); + asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0); + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0); + asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0); + } + /* + * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3. + */ + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET, + ret_addr[5]); + asd_write_reg_word(asd_ha, + LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); + asd_write_reg_word(asd_ha, + LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); + asd_write_reg_byte(asd_ha, + LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); + asd_write_reg_word(asd_ha, + LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); + + /* LSEQ Mode dependent 0, page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq), + (u16)asd_ha->hw_prof.max_ddbs); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq), + (u16)last_scb_site_no+1); + asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq), + (u16) LmM0INTEN_MASK & 0xFFFF0000 >> 16); + asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2, + (u16) LmM0INTEN_MASK & 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0); + + /* LSEQ mode dependent, mode 1, page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0); + + /* LSEQ Mode dependent mode 2, page 0 setup */ + asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0); + + /* LSEQ Mode dependent, mode 4/5, page 0 setup. */ + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0); + /* + * Set the desired interval between transmissions of the NOTIFY + * (ENABLE SPINUP) primitive. Must be initilized to val - 1. + */ + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq), + ASD_NOTIFY_TIMEOUT - 1); + /* No delay for the first NOTIFY to be sent to the attached target. */ + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq), + ASD_NOTIFY_DOWN_COUNT); + + /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */ + for (i = 0; i < 2; i++) { + int j; + /* Start from Page 1 of Mode 0 and 1. */ + moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE; + /* All the fields of page 1 can be intialized to 0. */ + for (j = 0; j < LSEQ_PAGE_SIZE; j += 4) + asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0); + } + + /* LSEQ Mode dependent, mode 2, page 1 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0); + + /* LSEQ Mode dependent, mode 4/5, page 1. */ + for (i = 0; i < LSEQ_PAGE_SIZE; i+=4) + asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0); + asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF); + asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF); + + /* LSEQ Mode dependent, mode 0, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0); + + /* LSEQ Mode Dependent 1, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0); + asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0); + + /* LSEQ Mode Dependent 2, page 2 setup. */ + /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer, + * i.e. always 0. */ + asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0); + asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0); + asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0); + + /* LSEQ Mode Dependent 4/5, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0); +} + +/** + * asd_init_lseq_scratch -- setup and init link sequencers + * @asd_ha: pointer to host adapter struct + */ +static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha) +{ + u8 lseq; + u8 lseq_mask; + + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + asd_init_lseq_mip(asd_ha, lseq); + asd_init_lseq_mdp(asd_ha, lseq); + } +} + +/** + * asd_init_scb_sites -- initialize sequencer SCB sites (memory). + * @asd_ha: pointer to host adapter structure + * + * This should be done before initializing common CSEQ and LSEQ + * scratch since those areas depend on some computed values here, + * last_scb_site_no, etc. + */ +static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) +{ + u16 site_no; + u16 max_scbs = 0; + + for (site_no = asd_ha->hw_prof.max_scbs-1; + site_no != (u16) -1; + site_no--) { + u16 i; + + /* Initialize all fields in the SCB site to 0. */ + for (i = 0; i < ASD_SCB_SIZE; i += 4) + asd_scbsite_write_dword(asd_ha, site_no, i, 0); + + /* Workaround needed by SEQ to fix a SATA issue is to exclude + * certain SCB sites from the free list. */ + if (!SCB_SITE_VALID(site_no)) + continue; + + if (last_scb_site_no == 0) + last_scb_site_no = site_no; + + /* For every SCB site, we need to initialize the + * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS, + * and SG Element Flag. */ + + /* Q_NEXT field of the last SCB is invalidated. */ + asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); + + /* Initialize SCB Site Opcode field to invalid. */ + asd_scbsite_write_byte(asd_ha, site_no, + offsetof(struct scb_header, opcode), + 0xFF); + + /* Initialize SCB Site Flags field to mean a response + * frame has been received. This means inadvertent + * frames received to be dropped. */ + asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); + + first_scb_site_no = site_no; + max_scbs++; + } + asd_ha->hw_prof.max_scbs = max_scbs; + ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs); + ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no); + ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no); +} + +/** + * asd_init_cseq_cio - initialize CSEQ CIO registers + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) +{ + int i; + + asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0); + asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS); + asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0); + asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0); + asd_ha->seq.scbpro = 0; + asd_write_reg_dword(asd_ha, SCBPRO, 0); + asd_write_reg_dword(asd_ha, CSEQCON, 0); + + /* Intialize CSEQ Mode 11 Interrupt Vectors. + * The addresses are 16 bit wide and in dword units. + * The values of their macros are in byte units. + * Thus we have to divide by 4. */ + asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]); + asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]); + asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]); + + /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ + asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC); + + /* Initialize CSEQ Scratch Page to 0x04. */ + asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04); + + /* Initialize CSEQ Mode[0-8] Dependent registers. */ + /* Initialize Scratch Page to 0. */ + for (i = 0; i < 9; i++) + asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0); + + /* Reset the ARP2 Program Count. */ + asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); + + for (i = 0; i < 8; i++) { + /* Intialize Mode n Link m Interrupt Enable. */ + asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF); + /* Initialize Mode n Request Mailbox. */ + asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0); + } +} + +/** + * asd_init_lseq_cio -- initialize LmSEQ CIO registers + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq) +{ + u8 *sas_addr; + int i; + + /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ + asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC); + + asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0); + + /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */ + for (i = 0; i < 3; i++) + asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0); + + /* Initialize Mode 5 SCRATCHPAGE to 0. */ + asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0); + + asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0); + /* Initialize Mode 0,1,2 and 5 Interrupt Enable and + * Interrupt registers. */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF); + /* Mode 1 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF); + /* Mode 2 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF); + /* Mode 5 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF); + + /* Enable HW Timer status. */ + asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK); + + /* Enable Primitive Status 0 and 1. */ + asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK); + asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK); + + /* Enable Frame Error. */ + asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK); + asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50); + + /* Initialize Mode 0 Transfer Level to 512. */ + asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512); + /* Initialize Mode 1 Transfer Level to 256. */ + asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256); + + /* Initialize Program Count. */ + asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); + + /* Enable Blind SG Move. */ + asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48); + asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq), + ASD_SATA_INTERLOCK_TIMEOUT); + + (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq)); + + /* Clear Primitive Status 0 and 1. */ + asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF); + asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF); + + /* Clear HW Timer status. */ + asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF); + + /* Clear DMA Errors for Mode 0 and 1. */ + asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF); + asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF); + + /* Clear SG DMA Errors for Mode 0 and 1. */ + asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF); + asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF); + + /* Clear Mode 0 Buffer Parity Error. */ + asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR); + + /* Clear Mode 0 Frame Error register. */ + asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF); + + /* Reset LSEQ external interrupt arbiter. */ + asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL); + + /* Set the Phy SAS for the LmSEQ WWN. */ + sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr; + for (i = 0; i < SAS_ADDR_SIZE; i++) + asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]); + + /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */ + asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0); + + /* Set the Bus Inactivity Time Limit Timer. */ + asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9); + + /* Enable SATA Port Multiplier. */ + asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80); + + /* Initialize Interrupt Vector[0-10] address in Mode 3. + * See the comment on CSEQ_INT_* */ + asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]); + asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]); + asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]); + asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]); + asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]); + asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]); + asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]); + asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]); + asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]); + asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]); + asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]); + /* + * Program the Link LED control, applicable only for + * Chip Rev. B or later. + */ + asd_write_reg_dword(asd_ha, LmCONTROL(lseq), + (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms)); + + /* Set the Align Rate for SAS and STP mode. */ + asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT); + asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT); +} + + +/** + * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox + * @asd_ha: pointer to host adapter struct + */ +static void asd_post_init_cseq(struct asd_ha_struct *asd_ha) +{ + int i; + + for (i = 0; i < 8; i++) + asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF); + for (i = 0; i < 8; i++) + asd_read_reg_dword(asd_ha, CMnRSPMBX(i)); + /* Reset the external interrupt arbiter. */ + asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL); +} + +/** + * asd_init_ddb_0 -- initialize DDB 0 + * @asd_ha: pointer to host adapter structure + * + * Initialize DDB site 0 which is used internally by the sequencer. + */ +static void asd_init_ddb_0(struct asd_ha_struct *asd_ha) +{ + int i; + + /* Zero out the DDB explicitly */ + for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4) + asd_ddbsite_write_dword(asd_ha, 0, i, 0); + + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail), + asd_ha->hw_prof.max_ddbs-1); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh), + asd_ha->hw_prof.num_phys * 2); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00); + /* DDB 0 is reserved */ + set_bit(0, asd_ha->hw_prof.ddb_bitmap); +} + +/** + * asd_seq_setup_seqs -- setup and initialize central and link sequencers + * @asd_ha: pointer to host adapter structure + */ +static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha) +{ + int lseq; + u8 lseq_mask; + + /* Initialize SCB sites. Done first to compute some values which + * the rest of the init code depends on. */ + asd_init_scb_sites(asd_ha); + + /* Initialize CSEQ Scratch RAM registers. */ + asd_init_cseq_scratch(asd_ha); + + /* Initialize LmSEQ Scratch RAM registers. */ + asd_init_lseq_scratch(asd_ha); + + /* Initialize CSEQ CIO registers. */ + asd_init_cseq_cio(asd_ha); + + asd_init_ddb_0(asd_ha); + + /* Initialize LmSEQ CIO registers. */ + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) + asd_init_lseq_cio(asd_ha, lseq); + asd_post_init_cseq(asd_ha); +} + + +/** + * asd_seq_start_cseq -- start the central sequencer, CSEQ + * @asd_ha: pointer to host adapter structure + */ +static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha) +{ + /* Reset the ARP2 instruction to location zero. */ + asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); + + /* Unpause the CSEQ */ + return asd_unpause_cseq(asd_ha); +} + +/** + * asd_seq_start_lseq -- start a link sequencer + * @asd_ha: pointer to host adapter structure + * @lseq: the link sequencer of interest + */ +static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + /* Reset the ARP2 instruction to location zero. */ + asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); + + /* Unpause the LmSEQ */ + return asd_seq_unpause_lseq(asd_ha, lseq); +} + +static int asd_request_firmware(struct asd_ha_struct *asd_ha) +{ + int err, i; + struct sequencer_file_header header, *hdr_ptr; + u32 csum = 0; + u16 *ptr_cseq_vecs, *ptr_lseq_vecs; + + if (sequencer_fw) + /* already loaded */ + return 0; + + err = request_firmware(&sequencer_fw, + SAS_RAZOR_SEQUENCER_FW_FILE, + &asd_ha->pcidev->dev); + if (err) + return err; + + hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data; + + header.csum = le32_to_cpu(hdr_ptr->csum); + header.major = le32_to_cpu(hdr_ptr->major); + header.minor = le32_to_cpu(hdr_ptr->minor); + sequencer_version = hdr_ptr->version; + header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset); + header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size); + header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset); + header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size); + header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset); + header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size); + header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset); + header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size); + header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task); + header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop); + header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop); + + for (i = sizeof(header.csum); i < sequencer_fw->size; i++) + csum += sequencer_fw->data[i]; + + if (csum != header.csum) { + asd_printk("Firmware file checksum mismatch\n"); + return -EINVAL; + } + + if (header.cseq_table_size != CSEQ_NUM_VECS || + header.lseq_table_size != LSEQ_NUM_VECS) { + asd_printk("Firmware file table size mismatch\n"); + return -EINVAL; + } + + ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset]; + ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset]; + mode2_task = header.mode2_task; + cseq_idle_loop = header.cseq_idle_loop; + lseq_idle_loop = header.lseq_idle_loop; + + for (i = 0; i < CSEQ_NUM_VECS; i++) + cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]); + + for (i = 0; i < LSEQ_NUM_VECS; i++) + lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]); + + cseq_code = &sequencer_fw->data[header.cseq_code_offset]; + cseq_code_size = header.cseq_code_size; + lseq_code = &sequencer_fw->data[header.lseq_code_offset]; + lseq_code_size = header.lseq_code_size; + + return 0; +} + +int asd_init_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_request_firmware(asd_ha); + + if (err) { + asd_printk("Failed to load sequencer firmware file %s, error %d\n", + SAS_RAZOR_SEQUENCER_FW_FILE, err); + return err; + } + + asd_printk("using sequencer %s\n", sequencer_version); + err = asd_seq_download_seqs(asd_ha); + if (err) { + asd_printk("couldn't download sequencers for %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + asd_seq_setup_seqs(asd_ha); + + return 0; +} + +int asd_start_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + u8 lseq_mask; + int lseq; + + err = asd_seq_start_cseq(asd_ha); + if (err) { + asd_printk("couldn't start CSEQ for %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_start_lseq(asd_ha, lseq); + if (err) { + asd_printk("coudln't start LSEQ %d for %s\n", lseq, + pci_name(asd_ha->pcidev)); + return err; + } + } + + return 0; +} + +/** + * asd_update_port_links -- update port_map_by_links and phy_is_up + * @sas_phy: pointer to the phy which has been added to a port + * + * 1) When a link reset has completed and we got BYTES DMAED with a + * valid frame we call this function for that phy, to indicate that + * the phy is up, i.e. we update the phy_is_up in DDB 0. The + * sequencer checks phy_is_up when pending SCBs are to be sent, and + * when an open address frame has been received. + * + * 2) When we know of ports, we call this function to update the map + * of phys participaing in that port, i.e. we update the + * port_map_by_links in DDB 0. When a HARD_RESET primitive has been + * received, the sequencer disables all phys in that port. + * port_map_by_links is also used as the conn_mask byte in the + * initiator/target port DDB. + */ +void asd_update_port_links(struct asd_sas_phy *sas_phy) +{ + struct asd_ha_struct *asd_ha = sas_phy->ha->lldd_ha; + const u8 phy_mask = (u8) sas_phy->port->phy_mask; + u8 phy_is_up; + u8 mask; + int i, err; + + for_each_phy(phy_mask, mask, i) + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, + port_map_by_links)+i,phy_mask); + + for (i = 0; i < 12; i++) { + phy_is_up = asd_ddbsite_read_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up)); + err = asd_ddbsite_update_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up), + phy_is_up, + phy_is_up | phy_mask); + if (!err) + break; + else if (err == -EFAULT) { + asd_printk("phy_is_up: parity error in DDB 0\n"); + break; + } + } + + if (err) + asd_printk("couldn't update DDB 0:error:%d\n", err); +} diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h new file mode 100644 index 0000000..42281c3 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_seq.h @@ -0,0 +1,70 @@ +/* + * Aic94xx SAS/SATA driver sequencer interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_SEQ_H_ +#define _AIC94XX_SEQ_H_ + +#define CSEQ_NUM_VECS 3 +#define LSEQ_NUM_VECS 11 + +#define SAS_RAZOR_SEQUENCER_FW_FILE "aic94xx-seq.fw" + +/* Note: All quantites in the sequencer file are little endian */ +struct sequencer_file_header { + /* Checksum of the entire contents of the sequencer excluding + * these four bytes */ + u32 csum; + /* numeric major version */ + u32 major; + /* numeric minor version */ + u32 minor; + /* version string printed by driver */ + char version[16]; + u32 cseq_table_offset; + u32 cseq_table_size; + u32 lseq_table_offset; + u32 lseq_table_size; + u32 cseq_code_offset; + u32 cseq_code_size; + u32 lseq_code_offset; + u32 lseq_code_size; + u16 mode2_task; + u16 cseq_idle_loop; + u16 lseq_idle_loop; +} __attribute__((packed)); + +#ifdef __KERNEL__ +int asd_pause_cseq(struct asd_ha_struct *asd_ha); +int asd_unpause_cseq(struct asd_ha_struct *asd_ha); +int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); +int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); +int asd_init_seqs(struct asd_ha_struct *asd_ha); +int asd_start_seqs(struct asd_ha_struct *asd_ha); + +void asd_update_port_links(struct asd_sas_phy *phy); +#endif + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c new file mode 100644 index 0000000..285e70d --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -0,0 +1,642 @@ +/* + * Aic94xx SAS/SATA Tasks + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include "aic94xx.h" +#include "aic94xx_sas.h" +#include "aic94xx_hwi.h" + +static void asd_unbuild_ata_ascb(struct asd_ascb *a); +static void asd_unbuild_smp_ascb(struct asd_ascb *a); +static void asd_unbuild_ssp_ascb(struct asd_ascb *a); + +static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) +{ + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_ha->seq.can_queue += num; + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); +} + +/* PCI_DMA_... to our direction translation. + */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; + +static inline int asd_map_scatterlist(struct sas_task *task, + struct sg_el *sg_arr, + unsigned long gfp_flags) +{ + struct asd_ascb *ascb = task->lldd_task; + struct asd_ha_struct *asd_ha = ascb->ha; + struct scatterlist *sc; + int num_sg, res; + + if (task->data_dir == PCI_DMA_NONE) + return 0; + + if (task->num_scatter == 0) { + void *p = task->scatter; + dma_addr_t dma = pci_map_single(asd_ha->pcidev, p, + task->total_xfer_len, + task->data_dir); + sg_arr[0].bus_addr = cpu_to_le64((u64)dma); + sg_arr[0].size = cpu_to_le32(task->total_xfer_len); + sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; + return 0; + } + + num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter, + task->data_dir); + if (num_sg == 0) + return -ENOMEM; + + if (num_sg > 3) { + int i; + + ascb->sg_arr = asd_alloc_coherent(asd_ha, + num_sg*sizeof(struct sg_el), + gfp_flags); + if (!ascb->sg_arr) { + res = -ENOMEM; + goto err_unmap; + } + for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { + struct sg_el *sg = + &((struct sg_el *)ascb->sg_arr->vaddr)[i]; + sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); + sg->size = cpu_to_le32((u32)sg_dma_len(sc)); + if (i == num_sg-1) + sg->flags |= ASD_SG_EL_LIST_EOL; + } + + for (sc = task->scatter, i = 0; i < 2; i++, sc++) { + sg_arr[i].bus_addr = + cpu_to_le64((u64)sg_dma_address(sc)); + sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); + } + sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr); + sg_arr[1].flags |= ASD_SG_EL_LIST_EOS; + + memset(&sg_arr[2], 0, sizeof(*sg_arr)); + sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); + } else { + int i; + for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { + sg_arr[i].bus_addr = + cpu_to_le64((u64)sg_dma_address(sc)); + sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); + } + sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL; + } + + return 0; +err_unmap: + pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, + task->data_dir); + return res; +} + +static inline void asd_unmap_scatterlist(struct asd_ascb *ascb) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_task *task = ascb->uldd_task; + + if (task->data_dir == PCI_DMA_NONE) + return; + + if (task->num_scatter == 0) { + dma_addr_t dma = (dma_addr_t) + le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); + pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, + task->data_dir); + return; + } + + asd_free_coherent(asd_ha, ascb->sg_arr); + pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, + task->data_dir); +} + +/* ---------- Task complete tasklet ---------- */ + +static void asd_get_response_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_task *task = ascb->uldd_task; + struct task_status_struct *ts = &task->task_status; + unsigned long flags; + struct tc_resp_sb_struct { + __le16 index_escb; + u8 len_lsb; + u8 flags; + } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; + +/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */ + int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; + struct asd_ascb *escb; + struct asd_dma_tok *edb; + void *r; + + spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); + escb = asd_tc_index_find(&asd_ha->seq, + (int)le16_to_cpu(resp_sb->index_escb)); + spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); + + if (!escb) { + ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); + return; + } + + ts->buf_valid_size = 0; + edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; + r = edb->vaddr; + if (task->task_proto == SAS_PROTO_SSP) { + struct ssp_response_iu *iu = + r + 16 + sizeof(struct ssp_frame_hdr); + + ts->residual = le32_to_cpu(*(__le32 *)r); + ts->resp = SAS_TASK_COMPLETE; + if (iu->datapres == 0) + ts->stat = iu->status; + else if (iu->datapres == 1) + ts->stat = iu->resp_data[3]; + else if (iu->datapres == 2) { + ts->stat = SAM_CHECK_COND; + ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE, + be32_to_cpu(iu->sense_data_len)); + memcpy(ts->buf, iu->sense_data, ts->buf_valid_size); + if (iu->status != SAM_CHECK_COND) { + ASD_DPRINTK("device %llx sent sense data, but " + "stat(0x%x) is not CHECK_CONDITION" + "\n", + SAS_ADDR(task->dev->sas_addr), + ts->stat); + } + } + } else { + struct ata_task_resp *resp = (void *) &ts->buf[0]; + + ts->residual = le32_to_cpu(*(__le32 *)r); + + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); + memcpy(&resp->ending_fis[0], r+16, 24); + ts->buf_valid_size = sizeof(*resp); + } + } + + asd_invalidate_edb(escb, edb_id); +} + +static void asd_task_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct sas_task *task = ascb->uldd_task; + struct task_status_struct *ts = &task->task_status; + unsigned long flags; + u8 opcode = dl->opcode; + + asd_can_dequeue(ascb->ha, 1); + +Again: + switch (opcode) { + case TC_NO_ERROR: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_GOOD; + break; + case TC_UNDERRUN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = le32_to_cpu(*(__le32 *)dl->status_block); + break; + case TC_OVERRUN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case TC_SSP_RESP: + case TC_ATA_RESP: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + asd_get_response_tasklet(ascb, dl); + break; + case TF_OPEN_REJECT: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + if (dl->status_block[1] & 2) + ts->open_rej_reason = 1 + dl->status_block[2]; + else if (dl->status_block[1] & 1) + ts->open_rej_reason = (dl->status_block[2] >> 4)+10; + else + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case TF_OPEN_TO: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_TO; + break; + case TF_PHY_DOWN: + case TU_PHY_DOWN: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case TI_PHY_DOWN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case TI_BREAK: + case TI_PROTO_ERR: + case TI_NAK: + case TI_ACK_NAK_TO: + case TF_SMP_XMIT_RCV_ERR: + case TC_ATA_R_ERR_RECV: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case TF_BREAK: + case TU_BREAK: + case TU_ACK_NAK_TO: + case TF_SMPRSP_TO: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case TF_NAK_RECV: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case TA_I_T_NEXUS_LOSS: + opcode = dl->status_block[0]; + goto Again; + break; + case TF_INV_CONN_HANDLE: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEVICE_UNKNOWN; + break; + case TF_REQUESTED_N_PENDING: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PENDING; + break; + case TC_TASK_CLEARED: + case TA_ON_REQ: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + + case TF_NO_SMP_CONN: + case TF_TMF_NO_CTX: + case TF_TMF_NO_TAG: + case TF_TMF_TAG_FREE: + case TF_TMF_TASK_DONE: + case TF_TMF_NO_CONN_HANDLE: + case TF_IRTT_TO: + case TF_IU_SHORT: + case TF_DATA_OFFS_ERR: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + + case TC_LINK_ADM_RESP: + case TC_CONTROL_PHY: + case TC_RESUME: + case TC_PARTIAL_SG_LIST: + default: + ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode); + break; + } + + switch (task->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + asd_unbuild_ata_ascb(ascb); + break; + case SAS_PROTO_SMP: + asd_unbuild_smp_ascb(ascb); + break; + case SAS_PROTO_SSP: + asd_unbuild_ssp_ascb(ascb); + default: + break; + } + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + task, opcode, ts->resp, ts->stat); + complete(&ascb->completion); + } else { + spin_unlock_irqrestore(&task->task_state_lock, flags); + task->lldd_task = NULL; + asd_ascb_free(ascb); + mb(); + task->task_done(task); + } +} + +/* ---------- ATA ---------- */ + +static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, + unsigned long gfp_flags) +{ + struct domain_device *dev = task->dev; + struct scb *scb; + u8 flags; + int res = 0; + + scb = ascb->scb; + + if (unlikely(task->ata_task.device_control_reg_update)) + scb->header.opcode = CONTROL_ATA_DEV; + else if (dev->sata_dev.command_set == ATA_COMMAND_SET) + scb->header.opcode = INITIATE_ATA_TASK; + else + scb->header.opcode = INITIATE_ATAPI_TASK; + + scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ + if (dev->port->oob_mode == SAS_OOB_MODE) + scb->ata_task.proto_conn_rate |= dev->linkrate; + + scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); + scb->ata_task.fis = task->ata_task.fis; + scb->ata_task.fis.fis_type = 0x27; + if (likely(!task->ata_task.device_control_reg_update)) + scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, + 16); + scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); + scb->ata_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)dev->lldd_dev); + + if (likely(!task->ata_task.device_control_reg_update)) { + flags = 0; + if (task->ata_task.dma_xfer) + flags |= DATA_XFER_MODE_DMA; + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) + flags |= ATA_Q_TYPE_NCQ; + flags |= data_dir_flags[task->data_dir]; + scb->ata_task.ata_flags = flags; + + scb->ata_task.retry_count = task->ata_task.retry_count; + + flags = 0; + if (task->ata_task.set_affil_pol) + flags |= SET_AFFIL_POLICY; + if (task->ata_task.stp_affil_pol) + flags |= STP_AFFIL_POLICY; + scb->ata_task.flags = flags; + } + ascb->tasklet_complete = asd_task_tasklet_complete; + + if (likely(!task->ata_task.device_control_reg_update)) + res = asd_map_scatterlist(task, scb->ata_task.sg_element, + gfp_flags); + + return res; +} + +static void asd_unbuild_ata_ascb(struct asd_ascb *a) +{ + asd_unmap_scatterlist(a); +} + +/* ---------- SMP ---------- */ + +static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, + unsigned long gfp_flags) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct domain_device *dev = task->dev; + struct scb *scb; + + pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, + PCI_DMA_FROMDEVICE); + pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + + scb = ascb->scb; + + scb->header.opcode = INITIATE_SMP_TASK; + + scb->smp_task.proto_conn_rate = dev->linkrate; + + scb->smp_task.smp_req.bus_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + scb->smp_task.smp_req.size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + + scb->smp_task.smp_resp.bus_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); + scb->smp_task.smp_resp.size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + + scb->smp_task.sister_scb = cpu_to_le16(0xFFFF); + scb->smp_task.conn_handle = cpu_to_le16((u16) + (unsigned long)dev->lldd_dev); + + ascb->tasklet_complete = asd_task_tasklet_complete; + + return 0; +} + +static void asd_unbuild_smp_ascb(struct asd_ascb *a) +{ + struct sas_task *task = a->uldd_task; + + BUG_ON(!task); + pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, + PCI_DMA_FROMDEVICE); + pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +} + +/* ---------- SSP ---------- */ + +static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, + unsigned long gfp_flags) +{ + struct domain_device *dev = task->dev; + struct scb *scb; + int res = 0; + + scb = ascb->scb; + + scb->header.opcode = INITIATE_SSP_TASK; + + scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */ + scb->ssp_task.proto_conn_rate |= dev->linkrate; + scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); + scb->ssp_task.ssp_frame.frame_type = SSP_DATA; + memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(scb->ssp_task.ssp_frame.hashed_src_addr, + dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); + + memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); + if (task->ssp_task.enable_first_burst) + scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK; + scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); + scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16); + + scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); + scb->ssp_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)dev->lldd_dev); + scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; + scb->ssp_task.retry_count = scb->ssp_task.retry_count; + + ascb->tasklet_complete = asd_task_tasklet_complete; + + res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); + + return res; +} + +static void asd_unbuild_ssp_ascb(struct asd_ascb *a) +{ + asd_unmap_scatterlist(a); +} + +/* ---------- Execute Task ---------- */ + +static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num) +{ + int res = 0; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + if ((asd_ha->seq.can_queue - num) < 0) + res = -SAS_QUEUE_FULL; + else + asd_ha->seq.can_queue -= num; + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return res; +} + +int asd_execute_task(struct sas_task *task, const int num, + unsigned long gfp_flags) +{ + int res = 0; + LIST_HEAD(alist); + struct sas_task *t = task; + struct asd_ascb *ascb = NULL, *a; + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + + res = asd_can_queue(asd_ha, num); + if (res) + return res; + + res = num; + ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); + if (res) { + res = -ENOMEM; + goto out_err; + } + + __list_add(&alist, ascb->list.prev, &ascb->list); + list_for_each_entry(a, &alist, list) { + a->uldd_task = t; + t->lldd_task = a; + t = list_entry(t->list.next, struct sas_task, list); + } + list_for_each_entry(a, &alist, list) { + t = a->uldd_task; + a->uldd_timer = 1; + if (t->task_proto & SAS_PROTO_STP) + t->task_proto = SAS_PROTO_STP; + switch (t->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + res = asd_build_ata_ascb(a, t, gfp_flags); + break; + case SAS_PROTO_SMP: + res = asd_build_smp_ascb(a, t, gfp_flags); + break; + case SAS_PROTO_SSP: + res = asd_build_ssp_ascb(a, t, gfp_flags); + break; + default: + asd_printk("unknown sas_task proto: 0x%x\n", + t->task_proto); + res = -ENOMEM; + break; + } + if (res) + goto out_err_unmap; + } + list_del_init(&alist); + + res = asd_post_ascb_list(asd_ha, ascb, num); + if (unlikely(res)) { + a = NULL; + __list_add(&alist, ascb->list.prev, &ascb->list); + goto out_err_unmap; + } + + return 0; +out_err_unmap: + { + struct asd_ascb *b = a; + list_for_each_entry(a, &alist, list) { + if (a == b) + break; + t = a->uldd_task; + switch (t->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + asd_unbuild_ata_ascb(a); + break; + case SAS_PROTO_SMP: + asd_unbuild_smp_ascb(a); + break; + case SAS_PROTO_SSP: + asd_unbuild_ssp_ascb(a); + default: + break; + } + t->lldd_task = NULL; + } + } + list_del_init(&alist); +out_err: + if (ascb) + asd_ascb_free_list(ascb); + asd_can_dequeue(asd_ha, num); + return res; +} diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c new file mode 100644 index 0000000..6123438 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -0,0 +1,636 @@ +/* + * Aic94xx Task Management Functions + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include "aic94xx.h" +#include "aic94xx_sas.h" +#include "aic94xx_hwi.h" + +/* ---------- Internal enqueue ---------- */ + +static int asd_enqueue_internal(struct asd_ascb *ascb, + void (*tasklet_complete)(struct asd_ascb *, + struct done_list_struct *), + void (*timed_out)(unsigned long)) +{ + int res; + + ascb->tasklet_complete = tasklet_complete; + ascb->uldd_timer = 1; + + ascb->timer.data = (unsigned long) ascb; + ascb->timer.function = timed_out; + ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; + + add_timer(&ascb->timer); + + res = asd_post_ascb_list(ascb->ha, ascb, 1); + if (unlikely(res)) + del_timer(&ascb->timer); + return res; +} + +static inline void asd_timedout_common(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + struct asd_seq_data *seq = &ascb->ha->seq; + unsigned long flags; + + spin_lock_irqsave(&seq->pend_q_lock, flags); + seq->pending--; + list_del_init(&ascb->list); + spin_unlock_irqrestore(&seq->pend_q_lock, flags); +} + +/* ---------- CLEAR NEXUS ---------- */ + +static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + ASD_DPRINTK("%s: here\n", __FUNCTION__); + if (!del_timer(&ascb->timer)) { + ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); + return; + } + ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); + ascb->uldd_task = (void *) (unsigned long) dl->opcode; + complete(&ascb->completion); +} + +static void asd_clear_nexus_timedout(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + + ASD_DPRINTK("%s: here\n", __FUNCTION__); + asd_timedout_common(data); + ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; + complete(&ascb->completion); +} + +#define CLEAR_NEXUS_PRE \ + ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ + res = 1; \ + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ + if (!ascb) \ + return -ENOMEM; \ + \ + scb = ascb->scb; \ + scb->header.opcode = CLEAR_NEXUS + +#define CLEAR_NEXUS_POST \ + ASD_DPRINTK("%s: POST\n", __FUNCTION__); \ + res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ + asd_clear_nexus_timedout); \ + if (res) \ + goto out_err; \ + ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ + wait_for_completion(&ascb->completion); \ + res = (int) (unsigned long) ascb->uldd_task; \ + if (res == TC_NO_ERROR) \ + res = TMF_RESP_FUNC_COMPLETE; \ +out_err: \ + asd_ascb_free(ascb); \ + return res + +int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) +{ + struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_ADAPTER; + CLEAR_NEXUS_POST; +} + +int asd_clear_nexus_port(struct asd_sas_port *port) +{ + struct asd_ha_struct *asd_ha = port->ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_PORT; + scb->clear_nexus.conn_mask = port->phy_mask; + CLEAR_NEXUS_POST; +} + +#if 0 +static int asd_clear_nexus_I_T(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_I_T; + scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; + if (dev->tproto) + scb->clear_nexus.flags |= SUSPEND_TX; + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + CLEAR_NEXUS_POST; +} +#endif + +static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_I_T_L; + scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; + if (dev->tproto) + scb->clear_nexus.flags |= SUSPEND_TX; + memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +static int asd_clear_nexus_tag(struct sas_task *task) +{ + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + struct asd_ascb *tascb = task->lldd_task; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_TAG; + memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); + scb->clear_nexus.ssp_task.tag = tascb->tag; + if (task->dev->tproto) + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + task->dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +static int asd_clear_nexus_index(struct sas_task *task) +{ + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + struct asd_ascb *tascb = task->lldd_task; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_TRANS_CX; + if (task->dev->tproto) + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + task->dev->lldd_dev); + scb->clear_nexus.index = cpu_to_le16(tascb->tc_index); + CLEAR_NEXUS_POST; +} + +/* ---------- TMFs ---------- */ + +static void asd_tmf_timedout(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + + ASD_DPRINTK("tmf timed out\n"); + asd_timedout_common(data); + ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; + complete(&ascb->completion); +} + +static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + unsigned long flags; + struct tc_resp_sb_struct { + __le16 index_escb; + u8 len_lsb; + u8 flags; + } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; + + int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; + struct asd_ascb *escb; + struct asd_dma_tok *edb; + struct ssp_frame_hdr *fh; + struct ssp_response_iu *ru; + int res = TMF_RESP_FUNC_FAILED; + + ASD_DPRINTK("tmf resp tasklet\n"); + + spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); + escb = asd_tc_index_find(&asd_ha->seq, + (int)le16_to_cpu(resp_sb->index_escb)); + spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); + + if (!escb) { + ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); + return res; + } + + edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; + ascb->tag = *(__be16 *)(edb->vaddr+4); + fh = edb->vaddr + 16; + ru = edb->vaddr + 16 + sizeof(*fh); + res = ru->status; + if (ru->datapres == 1) /* Response data present */ + res = ru->resp_data[3]; +#if 0 + ascb->tag = fh->tag; +#endif + ascb->tag_valid = 1; + + asd_invalidate_edb(escb, edb_id); + return res; +} + +static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + if (!del_timer(&ascb->timer)) + return; + + ASD_DPRINTK("tmf tasklet complete\n"); + + if (dl->opcode == TC_SSP_RESP) + ascb->uldd_task = (void *) (unsigned long) + asd_get_tmf_resp_tasklet(ascb, dl); + else + ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; + + complete(&ascb->completion); +} + +static inline int asd_clear_nexus(struct sas_task *task) +{ + int res = TMF_RESP_FUNC_FAILED; + struct asd_ascb *tascb = task->lldd_task; + unsigned long flags; + + ASD_DPRINTK("task not done, clearing nexus\n"); + if (tascb->tag_valid) + res = asd_clear_nexus_tag(task); + else + res = asd_clear_nexus_index(task); + wait_for_completion_timeout(&tascb->completion, + AIC94XX_SCB_TIMEOUT); + ASD_DPRINTK("came back from clear nexus\n"); + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) + res = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + return res; +} + +/** + * asd_abort_task -- ABORT TASK TMF + * @task: the task to be aborted + * + * Before calling ABORT TASK the task state flags should be ORed with + * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under + * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called. + * + * Implements the ABORT TASK TMF, I_T_L_Q nexus. + * Returns: SAS TMF responses (see sas_task.h), + * -ENOMEM, + * -SAS_QUEUE_FULL. + * + * When ABORT TASK returns, the caller of ABORT TASK checks first the + * task->task_state_flags, and then the return value of ABORT TASK. + * + * If the task has task state bit SAS_TASK_STATE_DONE set, then the + * task was completed successfully prior to it being aborted. The + * caller of ABORT TASK has responsibility to call task->task_done() + * xor free the task, depending on their framework. The return code + * is TMF_RESP_FUNC_FAILED in this case. + * + * Else the SAS_TASK_STATE_DONE bit is not set, + * If the return code is TMF_RESP_FUNC_COMPLETE, then + * the task was aborted successfully. The caller of + * ABORT TASK has responsibility to call task->task_done() + * to finish the task, xor free the task depending on their + * framework. + * else + * the ABORT TASK returned some kind of error. The task + * was _not_ cancelled. Nothing can be assumed. + * The caller of ABORT TASK may wish to retry. + */ +int asd_abort_task(struct sas_task *task) +{ + struct asd_ascb *tascb = task->lldd_task; + struct asd_ha_struct *asd_ha = tascb->ha; + int res = 1; + unsigned long flags; + struct asd_ascb *ascb = NULL; + struct scb *scb; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + res = TMF_RESP_FUNC_COMPLETE; + ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + scb = ascb->scb; + + scb->header.opcode = ABORT_TASK; + + switch (task->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + scb->abort_task.proto_conn_rate = (1 << 5); /* STP */ + break; + case SAS_PROTO_SSP: + scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */ + scb->abort_task.proto_conn_rate |= task->dev->linkrate; + break; + case SAS_PROTO_SMP: + break; + default: + break; + } + + if (task->task_proto == SAS_PROTO_SSP) { + scb->abort_task.ssp_frame.frame_type = SSP_TASK; + memcpy(scb->abort_task.ssp_frame.hashed_dest_addr, + task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + memcpy(scb->abort_task.ssp_frame.hashed_src_addr, + task->dev->port->ha->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); + + memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8); + scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK; + scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF); + } + + scb->abort_task.sister_scb = cpu_to_le16(0xFFFF); + scb->abort_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)task->dev->lldd_dev); + scb->abort_task.retry_count = 1; + scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index); + scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); + + res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, + asd_tmf_timedout); + if (res) + goto out; + wait_for_completion(&ascb->completion); + ASD_DPRINTK("tmf came back\n"); + + res = (int) (unsigned long) ascb->uldd_task; + tascb->tag = ascb->tag; + tascb->tag_valid = ascb->tag_valid; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + res = TMF_RESP_FUNC_COMPLETE; + ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + switch (res) { + /* The task to be aborted has been sent to the device. + * We got a Response IU for the ABORT TASK TMF. */ + case TC_NO_ERROR + 0xFF00: + case TMF_RESP_FUNC_COMPLETE: + case TMF_RESP_FUNC_FAILED: + res = asd_clear_nexus(task); + break; + case TMF_RESP_INVALID_FRAME: + case TMF_RESP_OVERLAPPED_TAG: + case TMF_RESP_FUNC_ESUPP: + case TMF_RESP_NO_LUN: + goto out_done; break; + } + /* In the following we assume that the managing layer + * will _never_ make a mistake, when issuing ABORT TASK. + */ + switch (res) { + default: + res = asd_clear_nexus(task); + /* fallthrough */ + case TC_NO_ERROR + 0xFF00: + case TMF_RESP_FUNC_COMPLETE: + break; + /* The task hasn't been sent to the device xor we never got + * a (sane) Response IU for the ABORT TASK TMF. + */ + case TF_NAK_RECV + 0xFF00: + res = TMF_RESP_INVALID_FRAME; + break; + case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */ + res = TMF_RESP_FUNC_FAILED; + wait_for_completion_timeout(&tascb->completion, + AIC94XX_SCB_TIMEOUT); + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) + res = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + goto out_done; + case TF_TMF_NO_TAG + 0xFF00: + case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ + case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ + res = TMF_RESP_FUNC_COMPLETE; + goto out_done; + case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ + res = TMF_RESP_FUNC_ESUPP; + goto out; + } +out_done: + if (res == TMF_RESP_FUNC_COMPLETE) { + task->lldd_task = NULL; + mb(); + asd_ascb_free(tascb); + } +out: + asd_ascb_free(ascb); + ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); + return res; +} + +/** + * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus + * @dev: pointer to struct domain_device of interest + * @lun: pointer to u8[8] which is the LUN + * @tmf: the TMF to be performed (see sas_task.h or the SAS spec) + * @index: the transaction context of the task to be queried if QT TMF + * + * This function is used to send ABORT TASK SET, CLEAR ACA, + * CLEAR TASK SET, LU RESET and QUERY TASK TMFs. + * + * No SCBs should be queued to the I_T_L nexus when this SCB is + * pending. + * + * Returns: TMF response code (see sas_task.h or the SAS spec) + */ +static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, + int tmf, int index) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + int res = 1; + struct scb *scb; + + if (!(dev->tproto & SAS_PROTO_SSP)) + return TMF_RESP_FUNC_ESUPP; + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + scb = ascb->scb; + + if (tmf == TMF_QUERY_TASK) + scb->header.opcode = QUERY_SSP_TASK; + else + scb->header.opcode = INITIATE_SSP_TMF; + + scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */ + scb->ssp_tmf.proto_conn_rate |= dev->linkrate; + /* SSP frame header */ + scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK; + memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr, + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr, + dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF); + /* SSP Task IU */ + memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8); + scb->ssp_tmf.ssp_task.tmf = tmf; + + scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF); + scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + scb->ssp_tmf.retry_count = 1; + scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); + if (tmf == TMF_QUERY_TASK) + scb->ssp_tmf.index = cpu_to_le16(index); + + res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, + asd_tmf_timedout); + if (res) + goto out_err; + wait_for_completion(&ascb->completion); + res = (int) (unsigned long) ascb->uldd_task; + + switch (res) { + case TC_NO_ERROR + 0xFF00: + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_NAK_RECV + 0xFF00: + res = TMF_RESP_INVALID_FRAME; + break; + case TF_TMF_TASK_DONE + 0xFF00: + res = TMF_RESP_FUNC_FAILED; + break; + case TF_TMF_NO_TAG + 0xFF00: + case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ + case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ + res = TMF_RESP_FUNC_ESUPP; + break; + default: + ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n", + __FUNCTION__, res); + res = TMF_RESP_FUNC_FAILED; + break; + } +out_err: + asd_ascb_free(ascb); + return res; +} + +int asd_abort_task_set(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_clear_aca(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_lu_reset(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +/** + * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus + * task: pointer to sas_task struct of interest + * + * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set, + * or TMF_RESP_FUNC_SUCC if the task is in the task set. + * + * Normally the management layer sets the task to aborted state, + * and then calls query task and then abort task. + */ +int asd_query_task(struct sas_task *task) +{ + struct asd_ascb *ascb = task->lldd_task; + int index; + + if (ascb) { + index = ascb->tc_index; + return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN, + TMF_QUERY_TASK, index); + } + return TMF_RESP_FUNC_COMPLETE; +} diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig new file mode 100644 index 0000000..aafdc92f --- /dev/null +++ b/drivers/scsi/libsas/Kconfig @@ -0,0 +1,39 @@ +# +# Kernel configuration file for the SAS Class +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov +# +# This file is licensed under GPLv2. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +# USA +# + +config SCSI_SAS_LIBSAS + tristate "SAS Domain Transport Attributes" + depends on SCSI + select SCSI_SAS_ATTRS + help + This provides transport specific helpers for SAS drivers which + use the domain device construct (like the aic94xxx). + +config SCSI_SAS_LIBSAS_DEBUG + bool "Compile the SAS Domain Transport Attributes in debug mode" + default y + depends on SCSI_SAS_LIBSAS + help + Compiles the SAS Layer in debug mode. In debug mode, the + SAS Layer prints diagnostic and debug messages. diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile new file mode 100644 index 0000000..44d972a --- /dev/null +++ b/drivers/scsi/libsas/Makefile @@ -0,0 +1,36 @@ +# +# Kernel Makefile for the libsas helpers +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov +# +# This file is licensed under GPLv2. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +# USA + +ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y) + EXTRA_CFLAGS += -DSAS_DEBUG +endif + +obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o +libsas-y += sas_init.o \ + sas_phy.o \ + sas_port.o \ + sas_event.o \ + sas_dump.o \ + sas_discover.o \ + sas_expander.o \ + sas_scsi_host.o diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c new file mode 100644 index 0000000..d977bd4 --- /dev/null +++ b/drivers/scsi/libsas/sas_discover.c @@ -0,0 +1,749 @@ +/* + * Serial Attached SCSI (SAS) Discover process + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include "sas_internal.h" + +#include +#include +#include "../scsi_sas_internal.h" + +/* ---------- Basic task processing for discovery purposes ---------- */ + +void sas_init_dev(struct domain_device *dev) +{ + INIT_LIST_HEAD(&dev->siblings); + INIT_LIST_HEAD(&dev->dev_list_node); + switch (dev->dev_type) { + case SAS_END_DEV: + break; + case EDGE_DEV: + case FANOUT_DEV: + INIT_LIST_HEAD(&dev->ex_dev.children); + break; + case SATA_DEV: + case SATA_PM: + case SATA_PM_PORT: + INIT_LIST_HEAD(&dev->sata_dev.children); + break; + default: + break; + } +} + +static void sas_task_timedout(unsigned long _task) +{ + struct sas_task *task = (void *) _task; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + complete(&task->completion); +} + +static void sas_disc_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +#define SAS_DEV_TIMEOUT 10 + +/** + * sas_execute_task -- Basic task processing for discovery + * @task: the task to be executed + * @buffer: pointer to buffer to do I/O + * @size: size of @buffer + * @pci_dma_dir: PCI_DMA_... + */ +static int sas_execute_task(struct sas_task *task, void *buffer, int size, + int pci_dma_dir) +{ + int res = 0; + struct scatterlist *scatter = NULL; + struct task_status_struct *ts = &task->task_status; + int num_scatter = 0; + int retries = 0; + struct sas_internal *i = + to_sas_internal(task->dev->port->ha->core.shost->transportt); + + if (pci_dma_dir != PCI_DMA_NONE) { + scatter = kzalloc(sizeof(*scatter), GFP_KERNEL); + if (!scatter) + goto out; + + sg_init_one(scatter, buffer, size); + num_scatter = 1; + } + + task->task_proto = task->dev->tproto; + task->scatter = scatter; + task->num_scatter = num_scatter; + task->total_xfer_len = size; + task->data_dir = pci_dma_dir; + task->task_done = sas_disc_task_done; + + for (retries = 0; retries < 5; retries++) { + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_completion(&task->completion); + + task->timer.data = (unsigned long) task; + task->timer.function = sas_task_timedout; + task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ; + add_timer(&task->timer); + + res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); + if (res) { + del_timer(&task->timer); + SAS_DPRINTK("executing SAS discovery task failed:%d\n", + res); + goto ex_err; + } + wait_for_completion(&task->completion); + res = -ETASK; + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + int res2; + SAS_DPRINTK("task aborted, flags:0x%x\n", + task->task_state_flags); + res2 = i->dft->lldd_abort_task(task); + SAS_DPRINTK("came back from abort task\n"); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + if (res2 == TMF_RESP_FUNC_COMPLETE) + continue; /* Retry the task */ + else + goto ex_err; + } + } + if (task->task_status.stat == SAM_BUSY || + task->task_status.stat == SAM_TASK_SET_FULL || + task->task_status.stat == SAS_QUEUE_FULL) { + SAS_DPRINTK("task: q busy, sleeping...\n"); + schedule_timeout_interruptible(HZ); + } else if (task->task_status.stat == SAM_CHECK_COND) { + struct scsi_sense_hdr shdr; + + if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size, + &shdr)) { + SAS_DPRINTK("couldn't normalize sense\n"); + continue; + } + if ((shdr.sense_key == 6 && shdr.asc == 0x29) || + (shdr.sense_key == 2 && shdr.asc == 4 && + shdr.ascq == 1)) { + SAS_DPRINTK("device %016llx LUN: %016llx " + "powering up or not ready yet, " + "sleeping...\n", + SAS_ADDR(task->dev->sas_addr), + SAS_ADDR(task->ssp_task.LUN)); + + schedule_timeout_interruptible(5*HZ); + } else if (shdr.sense_key == 1) { + res = 0; + break; + } else if (shdr.sense_key == 5) { + break; + } else { + SAS_DPRINTK("dev %016llx LUN: %016llx " + "sense key:0x%x ASC:0x%x ASCQ:0x%x" + "\n", + SAS_ADDR(task->dev->sas_addr), + SAS_ADDR(task->ssp_task.LUN), + shdr.sense_key, + shdr.asc, shdr.ascq); + } + } else if (task->task_status.resp != SAS_TASK_COMPLETE || + task->task_status.stat != SAM_GOOD) { + SAS_DPRINTK("task finished with resp:0x%x, " + "stat:0x%x\n", + task->task_status.resp, + task->task_status.stat); + goto ex_err; + } else { + res = 0; + break; + } + } +ex_err: + if (pci_dma_dir != PCI_DMA_NONE) + kfree(scatter); +out: + return res; +} + +/* ---------- Domain device discovery ---------- */ + +/** + * sas_get_port_device -- Discover devices which caused port creation + * @port: pointer to struct sas_port of interest + * + * Devices directly attached to a HA port, have no parent. This is + * how we know they are (domain) "root" devices. All other devices + * do, and should have their "parent" pointer set appropriately as + * soon as a child device is discovered. + */ +static int sas_get_port_device(struct asd_sas_port *port) +{ + unsigned long flags; + struct asd_sas_phy *phy; + struct sas_rphy *rphy; + struct domain_device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + spin_lock_irqsave(&port->phy_list_lock, flags); + if (list_empty(&port->phy_list)) { + spin_unlock_irqrestore(&port->phy_list_lock, flags); + kfree(dev); + return -ENODEV; + } + phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_lock(&phy->frame_rcvd_lock); + memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd), + (size_t)phy->frame_rcvd_size)); + spin_unlock(&phy->frame_rcvd_lock); + spin_unlock_irqrestore(&port->phy_list_lock, flags); + + if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) { + struct dev_to_host_fis *fis = + (struct dev_to_host_fis *) dev->frame_rcvd; + if (fis->interrupt_reason == 1 && fis->lbal == 1 && + fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 + && (fis->device & ~0x10) == 0) + dev->dev_type = SATA_PM; + else + dev->dev_type = SATA_DEV; + dev->tproto = SATA_PROTO; + } else { + struct sas_identify_frame *id = + (struct sas_identify_frame *) dev->frame_rcvd; + dev->dev_type = id->dev_type; + dev->iproto = id->initiator_bits; + dev->tproto = id->target_bits; + } + + sas_init_dev(dev); + + switch (dev->dev_type) { + case SAS_END_DEV: + rphy = sas_end_device_alloc(port->port); + break; + case EDGE_DEV: + rphy = sas_expander_alloc(port->port, + SAS_EDGE_EXPANDER_DEVICE); + break; + case FANOUT_DEV: + rphy = sas_expander_alloc(port->port, + SAS_FANOUT_EXPANDER_DEVICE); + break; + case SATA_DEV: + default: + printk("ERROR: Unidentified device type %d\n", dev->dev_type); + rphy = NULL; + break; + } + + if (!rphy) { + kfree(dev); + return -ENODEV; + } + rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; + memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); + sas_fill_in_rphy(dev, rphy); + sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); + port->port_dev = dev; + dev->port = port; + dev->linkrate = port->linkrate; + dev->min_linkrate = port->linkrate; + dev->max_linkrate = port->linkrate; + dev->pathways = port->num_phys; + memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE); + memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE); + memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE); + port->disc.max_level = 0; + + dev->rphy = rphy; + spin_lock(&port->dev_list_lock); + list_add_tail(&dev->dev_list_node, &port->dev_list); + spin_unlock(&port->dev_list_lock); + + return 0; +} + +/* ---------- Discover and Revalidate ---------- */ + +/* ---------- SATA ---------- */ + +static void sas_get_ata_command_set(struct domain_device *dev) +{ + struct dev_to_host_fis *fis = + (struct dev_to_host_fis *) dev->frame_rcvd; + + if ((fis->sector_count == 1 && /* ATA */ + fis->lbal == 1 && + fis->lbam == 0 && + fis->lbah == 0 && + fis->device == 0) + || + (fis->sector_count == 0 && /* CE-ATA (mATA) */ + fis->lbal == 0 && + fis->lbam == 0xCE && + fis->lbah == 0xAA && + (fis->device & ~0x10) == 0)) + + dev->sata_dev.command_set = ATA_COMMAND_SET; + + else if ((fis->interrupt_reason == 1 && /* ATAPI */ + fis->lbal == 1 && + fis->byte_count_low == 0x14 && + fis->byte_count_high == 0xEB && + (fis->device & ~0x10) == 0)) + + dev->sata_dev.command_set = ATAPI_COMMAND_SET; + + else if ((fis->sector_count == 1 && /* SEMB */ + fis->lbal == 1 && + fis->lbam == 0x3C && + fis->lbah == 0xC3 && + fis->device == 0) + || + (fis->interrupt_reason == 1 && /* SATA PM */ + fis->lbal == 1 && + fis->byte_count_low == 0x69 && + fis->byte_count_high == 0x96 && + (fis->device & ~0x10) == 0)) + + /* Treat it as a superset? */ + dev->sata_dev.command_set = ATAPI_COMMAND_SET; +} + +/** + * sas_issue_ata_cmd -- Basic SATA command processing for discovery + * @dev: the device to send the command to + * @command: the command register + * @features: the features register + * @buffer: pointer to buffer to do I/O + * @size: size of @buffer + * @pci_dma_dir: PCI_DMA_... + */ +static int sas_issue_ata_cmd(struct domain_device *dev, u8 command, + u8 features, void *buffer, int size, + int pci_dma_dir) +{ + int res = 0; + struct sas_task *task; + struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *) + &dev->frame_rcvd[0]; + + res = -ENOMEM; + task = sas_alloc_task(GFP_KERNEL); + if (!task) + goto out; + + task->dev = dev; + + task->ata_task.fis.command = command; + task->ata_task.fis.features = features; + task->ata_task.fis.device = d2h_fis->device; + task->ata_task.retry_count = 1; + + res = sas_execute_task(task, buffer, size, pci_dma_dir); + + sas_free_task(task); +out: + return res; +} + +static void sas_sata_propagate_sas_addr(struct domain_device *dev) +{ + unsigned long flags; + struct asd_sas_port *port = dev->port; + struct asd_sas_phy *phy; + + BUG_ON(dev->parent); + + memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); + spin_lock_irqsave(&port->phy_list_lock, flags); + list_for_each_entry(phy, &port->phy_list, port_phy_el) + memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); + spin_unlock_irqrestore(&port->phy_list_lock, flags); +} + +#define ATA_IDENTIFY_DEV 0xEC +#define ATA_IDENTIFY_PACKET_DEV 0xA1 +#define ATA_SET_FEATURES 0xEF +#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07 + +/** + * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV) + * @dev: STP/SATA device of interest (ATA/ATAPI) + * + * The LLDD has already been notified of this device, so that we can + * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY + * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its + * performance for this device. + */ +static int sas_discover_sata_dev(struct domain_device *dev) +{ + int res; + __le16 *identify_x; + u8 command; + + identify_x = kzalloc(512, GFP_KERNEL); + if (!identify_x) + return -ENOMEM; + + if (dev->sata_dev.command_set == ATA_COMMAND_SET) { + dev->sata_dev.identify_device = identify_x; + command = ATA_IDENTIFY_DEV; + } else { + dev->sata_dev.identify_packet_device = identify_x; + command = ATA_IDENTIFY_PACKET_DEV; + } + + res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512, + PCI_DMA_FROMDEVICE); + if (res) + goto out_err; + + /* lives on the media? */ + if (le16_to_cpu(identify_x[0]) & 4) { + /* incomplete response */ + SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to " + "dev %llx\n", SAS_ADDR(dev->sas_addr)); + if (!le16_to_cpu(identify_x[83] & (1<<6))) + goto cont1; + res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES, + ATA_FEATURE_PUP_STBY_SPIN_UP, + NULL, 0, PCI_DMA_NONE); + if (res) + goto cont1; + + schedule_timeout_interruptible(5*HZ); /* More time? */ + res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512, + PCI_DMA_FROMDEVICE); + if (res) + goto out_err; + } +cont1: + /* Get WWN */ + if (dev->port->oob_mode != SATA_OOB_MODE) { + memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr, + SAS_ADDR_SIZE); + } else if (dev->sata_dev.command_set == ATA_COMMAND_SET && + (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000) + == 0x5000) { + int i; + + for (i = 0; i < 4; i++) { + dev->sas_addr[2*i] = + (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8; + dev->sas_addr[2*i+1] = + le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF; + } + } + sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); + if (!dev->parent) + sas_sata_propagate_sas_addr(dev); + + /* XXX Hint: register this SATA device with SATL. + When this returns, dev->sata_dev->lu is alive and + present. + sas_satl_register_dev(dev); + */ + return 0; +out_err: + dev->sata_dev.identify_packet_device = NULL; + dev->sata_dev.identify_device = NULL; + kfree(identify_x); + return res; +} + +static int sas_discover_sata_pm(struct domain_device *dev) +{ + return -ENODEV; +} + +int sas_notify_lldd_dev_found(struct domain_device *dev) +{ + int res = 0; + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->core.shost; + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (i->dft->lldd_dev_found) { + res = i->dft->lldd_dev_found(dev); + if (res) { + printk("sas: driver on pcidev %s cannot handle " + "device %llx, error:%d\n", + pci_name(sas_ha->pcidev), + SAS_ADDR(dev->sas_addr), res); + } + } + return res; +} + + +void sas_notify_lldd_dev_gone(struct domain_device *dev) +{ + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->core.shost; + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (i->dft->lldd_dev_gone) + i->dft->lldd_dev_gone(dev); +} + +/* ---------- Common/dispatchers ---------- */ + +/** + * sas_discover_sata -- discover an STP/SATA domain device + * @dev: pointer to struct domain_device of interest + * + * First we notify the LLDD of this device, so we can send frames to + * it. Then depending on the type of device we call the appropriate + * discover functions. Once device discover is done, we notify the + * LLDD so that it can fine-tune its parameters for the device, by + * removing it and then adding it. That is, the second time around, + * the driver would have certain fields, that it is looking at, set. + * Finally we initialize the kobj so that the device can be added to + * the system at registration time. Devices directly attached to a HA + * port, have no parents. All other devices do, and should have their + * "parent" pointer set appropriately before calling this function. + */ +int sas_discover_sata(struct domain_device *dev) +{ + int res; + + sas_get_ata_command_set(dev); + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + switch (dev->dev_type) { + case SATA_DEV: + res = sas_discover_sata_dev(dev); + break; + case SATA_PM: + res = sas_discover_sata_pm(dev); + break; + default: + break; + } + + sas_notify_lldd_dev_gone(dev); + if (!res) { + sas_notify_lldd_dev_found(dev); + } + return res; +} + +/** + * sas_discover_end_dev -- discover an end device (SSP, etc) + * @end: pointer to domain device of interest + * + * See comment in sas_discover_sata(). + */ +int sas_discover_end_dev(struct domain_device *dev) +{ + int res; + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + res = sas_rphy_add(dev->rphy); + if (res) + goto out_err; + + /* do this to get the end device port attributes which will have + * been scanned in sas_rphy_add */ + sas_notify_lldd_dev_gone(dev); + sas_notify_lldd_dev_found(dev); + + return 0; + +out_err: + sas_notify_lldd_dev_gone(dev); + return res; +} + +/* ---------- Device registration and unregistration ---------- */ + +static inline void sas_unregister_common_dev(struct domain_device *dev) +{ + sas_notify_lldd_dev_gone(dev); + if (!dev->parent) + dev->port->port_dev = NULL; + else + list_del_init(&dev->siblings); + list_del_init(&dev->dev_list_node); +} + +void sas_unregister_dev(struct domain_device *dev) +{ + if (dev->rphy) { + sas_remove_children(&dev->rphy->dev); + sas_rphy_delete(dev->rphy); + dev->rphy = NULL; + } + if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + /* remove the phys and ports, everything else should be gone */ + kfree(dev->ex_dev.ex_phy); + dev->ex_dev.ex_phy = NULL; + } + sas_unregister_common_dev(dev); +} + +void sas_unregister_domain_devices(struct asd_sas_port *port) +{ + struct domain_device *dev, *n; + + list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node) + sas_unregister_dev(dev); + + port->port->rphy = NULL; + +} + +/* ---------- Discovery and Revalidation ---------- */ + +/** + * sas_discover_domain -- discover the domain + * @port: port to the domain of interest + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain. + */ +static void sas_discover_domain(void *data) +{ + int error = 0; + struct asd_sas_port *port = data; + + sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, + &port->disc.pending); + + if (port->port_dev) + return ; + else { + error = sas_get_port_device(port); + if (error) + return; + } + + SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, + current->pid); + + switch (port->port_dev->dev_type) { + case SAS_END_DEV: + error = sas_discover_end_dev(port->port_dev); + break; + case EDGE_DEV: + case FANOUT_DEV: + error = sas_discover_root_expander(port->port_dev); + break; + case SATA_DEV: + case SATA_PM: + error = sas_discover_sata(port->port_dev); + break; + default: + SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type); + break; + } + + if (error) { + kfree(port->port_dev); /* not kobject_register-ed yet */ + port->port_dev = NULL; + } + + SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, + current->pid, error); +} + +static void sas_revalidate_domain(void *data) +{ + int res = 0; + struct asd_sas_port *port = data; + + sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, + &port->disc.pending); + + SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, + current->pid); + if (port->port_dev) + res = sas_ex_revalidate_domain(port->port_dev); + + SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", + port->id, current->pid, res); +} + +/* ---------- Events ---------- */ + +int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) +{ + struct sas_discovery *disc; + + if (!port) + return 0; + disc = &port->disc; + + BUG_ON(ev >= DISC_NUM_EVENTS); + + sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, + &disc->disc_work[ev], port->ha->core.shost); + + return 0; +} + +/** + * sas_init_disc -- initialize the discovery struct in the port + * @port: pointer to struct port + * + * Called when the ports are being initialized. + */ +void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) +{ + int i; + + static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { + [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, + [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, + }; + + spin_lock_init(&disc->disc_event_lock); + disc->pending = 0; + for (i = 0; i < DISC_NUM_EVENTS; i++) + INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); +} diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c new file mode 100644 index 0000000..f1246d2 --- /dev/null +++ b/drivers/scsi/libsas/sas_dump.c @@ -0,0 +1,76 @@ +/* + * Serial Attached SCSI (SAS) Dump/Debugging routines + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_dump.h" + +#ifdef SAS_DEBUG + +static const char *sas_hae_str[] = { + [0] = "HAE_RESET", +}; + +static const char *sas_porte_str[] = { + [0] = "PORTE_BYTES_DMAED", + [1] = "PORTE_BROADCAST_RCVD", + [2] = "PORTE_LINK_RESET_ERR", + [3] = "PORTE_TIMER_EVENT", + [4] = "PORTE_HARD_RESET", +}; + +static const char *sas_phye_str[] = { + [0] = "PHYE_LOSS_OF_SIGNAL", + [1] = "PHYE_OOB_DONE", + [2] = "PHYE_OOB_ERROR", + [3] = "PHYE_SPINUP_HOLD", +}; + +void sas_dprint_porte(int phyid, enum port_event pe) +{ + SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]); +} +void sas_dprint_phye(int phyid, enum phy_event pe) +{ + SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]); +} + +void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he) +{ + SAS_DPRINTK("ha %s: %s event\n", pci_name(sas_ha->pcidev), + sas_hae_str[he]); +} + +void sas_dump_port(struct asd_sas_port *port) +{ + SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class); + SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id, + SAS_ADDR(port->sas_addr)); + SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id, + SAS_ADDR(port->attached_sas_addr)); + SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto); + SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto); + SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode); + SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys); +} + +#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h new file mode 100644 index 0000000..47b45d4 --- /dev/null +++ b/drivers/scsi/libsas/sas_dump.h @@ -0,0 +1,42 @@ +/* + * Serial Attached SCSI (SAS) Dump/Debugging routines header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_internal.h" + +#ifdef SAS_DEBUG + +void sas_dprint_porte(int phyid, enum port_event pe); +void sas_dprint_phye(int phyid, enum phy_event pe); +void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he); +void sas_dump_port(struct asd_sas_port *port); + +#else /* SAS_DEBUG */ + +static inline void sas_dprint_porte(int phyid, enum port_event pe) { } +static inline void sas_dprint_phye(int phyid, enum phy_event pe) { } +static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha, + enum ha_event he) { } +static inline void sas_dump_port(struct asd_sas_port *port) { } + +#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c new file mode 100644 index 0000000..19110ed --- /dev/null +++ b/drivers/scsi/libsas/sas_event.c @@ -0,0 +1,75 @@ +/* + * Serial Attached SCSI (SAS) Event processing + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include "sas_internal.h" +#include "sas_dump.h" + +static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) +{ + BUG_ON(event >= HA_NUM_EVENTS); + + sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, + &sas_ha->ha_events[event], sas_ha->core.shost); +} + +static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) +{ + struct sas_ha_struct *ha = phy->ha; + + BUG_ON(event >= PORT_NUM_EVENTS); + + sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, + &phy->port_events[event], ha->core.shost); +} + +static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) +{ + struct sas_ha_struct *ha = phy->ha; + + BUG_ON(event >= PHY_NUM_EVENTS); + + sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, + &phy->phy_events[event], ha->core.shost); +} + +int sas_init_events(struct sas_ha_struct *sas_ha) +{ + static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { + [HAE_RESET] = sas_hae_reset, + }; + + int i; + + spin_lock_init(&sas_ha->event_lock); + + for (i = 0; i < HA_NUM_EVENTS; i++) + INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); + + sas_ha->notify_ha_event = notify_ha_event; + sas_ha->notify_port_event = notify_port_event; + sas_ha->notify_phy_event = notify_phy_event; + + return 0; +} diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c new file mode 100644 index 0000000..b653a26 --- /dev/null +++ b/drivers/scsi/libsas/sas_expander.c @@ -0,0 +1,1862 @@ +/* + * Serial Attached SCSI (SAS) Expander discovery and configuration + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include + +#include "sas_internal.h" + +#include +#include +#include "../scsi_sas_internal.h" + +static int sas_discover_expander(struct domain_device *dev); +static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); +static int sas_configure_phy(struct domain_device *dev, int phy_id, + u8 *sas_addr, int include); +static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); + +#if 0 +/* FIXME: smp needs to migrate into the sas class */ +static ssize_t smp_portal_read(struct kobject *, char *, loff_t, size_t); +static ssize_t smp_portal_write(struct kobject *, char *, loff_t, size_t); +#endif + +/* ---------- SMP task management ---------- */ + +static void smp_task_timedout(unsigned long _task) +{ + struct sas_task *task = (void *) _task; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + complete(&task->completion); +} + +static void smp_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +/* Give it some long enough timeout. In seconds. */ +#define SMP_TIMEOUT 10 + +static int smp_execute_task(struct domain_device *dev, void *req, int req_size, + void *resp, int resp_size) +{ + int res; + struct sas_task *task = sas_alloc_task(GFP_KERNEL); + struct sas_internal *i = + to_sas_internal(dev->port->ha->core.shost->transportt); + + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + sg_init_one(&task->smp_task.smp_req, req, req_size); + sg_init_one(&task->smp_task.smp_resp, resp, resp_size); + + task->task_done = smp_task_done; + + task->timer.data = (unsigned long) task; + task->timer.function = smp_task_timedout; + task->timer.expires = jiffies + SMP_TIMEOUT*HZ; + add_timer(&task->timer); + + res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); + + if (res) { + del_timer(&task->timer); + SAS_DPRINTK("executing SMP task failed:%d\n", res); + goto ex_err; + } + + wait_for_completion(&task->completion); + res = -ETASK; + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + SAS_DPRINTK("smp task timed out or aborted\n"); + i->dft->lldd_abort_task(task); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + SAS_DPRINTK("SMP task aborted and not done\n"); + goto ex_err; + } + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) + res = 0; + else + SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " + "status 0x%x\n", __FUNCTION__, + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); +ex_err: + sas_free_task(task); + return res; +} + +/* ---------- Allocations ---------- */ + +static inline void *alloc_smp_req(int size) +{ + u8 *p = kzalloc(size, GFP_KERNEL); + if (p) + p[0] = SMP_REQUEST; + return p; +} + +static inline void *alloc_smp_resp(int size) +{ + return kzalloc(size, GFP_KERNEL); +} + +/* ---------- Expander configuration ---------- */ + +static void sas_set_ex_phy(struct domain_device *dev, int phy_id, + void *disc_resp) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + struct smp_resp *resp = disc_resp; + struct discover_resp *dr = &resp->disc; + struct sas_rphy *rphy = dev->rphy; + int rediscover = (phy->phy != NULL); + + if (!rediscover) { + phy->phy = sas_phy_alloc(&rphy->dev, phy_id); + + /* FIXME: error_handling */ + BUG_ON(!phy->phy); + } + + switch (resp->result) { + case SMP_RESP_PHY_VACANT: + phy->phy_state = PHY_VACANT; + return; + default: + phy->phy_state = PHY_NOT_PRESENT; + return; + case SMP_RESP_FUNC_ACC: + phy->phy_state = PHY_EMPTY; /* do not know yet */ + break; + } + + phy->phy_id = phy_id; + phy->attached_dev_type = dr->attached_dev_type; + phy->linkrate = dr->linkrate; + phy->attached_sata_host = dr->attached_sata_host; + phy->attached_sata_dev = dr->attached_sata_dev; + phy->attached_sata_ps = dr->attached_sata_ps; + phy->attached_iproto = dr->iproto << 1; + phy->attached_tproto = dr->tproto << 1; + memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); + phy->attached_phy_id = dr->attached_phy_id; + phy->phy_change_count = dr->change_count; + phy->routing_attr = dr->routing_attr; + phy->virtual = dr->virtual; + phy->last_da_index = -1; + + phy->phy->identify.initiator_port_protocols = phy->attached_iproto; + phy->phy->identify.target_port_protocols = phy->attached_tproto; + phy->phy->identify.phy_identifier = phy_id; + phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; + switch (phy->linkrate) { + case PHY_LINKRATE_1_5: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + case PHY_LINKRATE_3: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_LINKRATE_6: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + default: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + break; + } + + if (!rediscover) + sas_phy_add(phy->phy); + + SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", + SAS_ADDR(dev->sas_addr), phy->phy_id, + phy->routing_attr == TABLE_ROUTING ? 'T' : + phy->routing_attr == DIRECT_ROUTING ? 'D' : + phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?', + SAS_ADDR(phy->attached_sas_addr)); + + return; +} + +#define DISCOVER_REQ_SIZE 16 +#define DISCOVER_RESP_SIZE 56 + +static int sas_ex_phy_discover(struct domain_device *dev, int single) +{ + struct expander_device *ex = &dev->ex_dev; + int res = 0; + u8 *disc_req; + u8 *disc_resp; + + disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); + if (!disc_req) + return -ENOMEM; + + disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); + if (!disc_resp) { + kfree(disc_req); + return -ENOMEM; + } + + disc_req[1] = SMP_DISCOVER; + + if (0 <= single && single < ex->num_phys) { + disc_req[9] = single; + res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (res) + goto out_err; + sas_set_ex_phy(dev, single, disc_resp); + } else { + int i; + + for (i = 0; i < ex->num_phys; i++) { + disc_req[9] = i; + res = smp_execute_task(dev, disc_req, + DISCOVER_REQ_SIZE, disc_resp, + DISCOVER_RESP_SIZE); + if (res) + goto out_err; + sas_set_ex_phy(dev, i, disc_resp); + } + } +out_err: + kfree(disc_resp); + kfree(disc_req); + return res; +} + +static int sas_expander_discover(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + int res = -ENOMEM; + + ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL); + if (!ex->ex_phy) + return -ENOMEM; + + res = sas_ex_phy_discover(dev, -1); + if (res) + goto out_err; + + return 0; + out_err: + kfree(ex->ex_phy); + ex->ex_phy = NULL; + return res; +} + +#define MAX_EXPANDER_PHYS 128 + +static void ex_assign_report_general(struct domain_device *dev, + struct smp_resp *resp) +{ + struct report_general_resp *rg = &resp->rg; + + dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); + dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); + dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); + dev->ex_dev.conf_route_table = rg->conf_route_table; + dev->ex_dev.configuring = rg->configuring; + memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); +} + +#define RG_REQ_SIZE 8 +#define RG_RESP_SIZE 32 + +static int sas_ex_general(struct domain_device *dev) +{ + u8 *rg_req; + struct smp_resp *rg_resp; + int res; + int i; + + rg_req = alloc_smp_req(RG_REQ_SIZE); + if (!rg_req) + return -ENOMEM; + + rg_resp = alloc_smp_resp(RG_RESP_SIZE); + if (!rg_resp) { + kfree(rg_req); + return -ENOMEM; + } + + rg_req[1] = SMP_REPORT_GENERAL; + + for (i = 0; i < 5; i++) { + res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, + RG_RESP_SIZE); + + if (res) { + SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", + SAS_ADDR(dev->sas_addr), res); + goto out; + } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { + SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", + SAS_ADDR(dev->sas_addr), rg_resp->result); + res = rg_resp->result; + goto out; + } + + ex_assign_report_general(dev, rg_resp); + + if (dev->ex_dev.configuring) { + SAS_DPRINTK("RG: ex %llx self-configuring...\n", + SAS_ADDR(dev->sas_addr)); + schedule_timeout_interruptible(5*HZ); + } else + break; + } +out: + kfree(rg_req); + kfree(rg_resp); + return res; +} + +static void ex_assign_manuf_info(struct domain_device *dev, void + *_mi_resp) +{ + u8 *mi_resp = _mi_resp; + struct sas_rphy *rphy = dev->rphy; + struct sas_expander_device *edev = rphy_to_expander_device(rphy); + + memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); + memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); + memcpy(edev->product_rev, mi_resp + 36, + SAS_EXPANDER_PRODUCT_REV_LEN); + + if (mi_resp[8] & 1) { + memcpy(edev->component_vendor_id, mi_resp + 40, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + edev->component_id = mi_resp[48] << 8 | mi_resp[49]; + edev->component_revision_id = mi_resp[50]; + } +} + +#define MI_REQ_SIZE 8 +#define MI_RESP_SIZE 64 + +static int sas_ex_manuf_info(struct domain_device *dev) +{ + u8 *mi_req; + u8 *mi_resp; + int res; + + mi_req = alloc_smp_req(MI_REQ_SIZE); + if (!mi_req) + return -ENOMEM; + + mi_resp = alloc_smp_resp(MI_RESP_SIZE); + if (!mi_resp) { + kfree(mi_req); + return -ENOMEM; + } + + mi_req[1] = SMP_REPORT_MANUF_INFO; + + res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); + if (res) { + SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", + SAS_ADDR(dev->sas_addr), res); + goto out; + } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { + SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", + SAS_ADDR(dev->sas_addr), mi_resp[2]); + goto out; + } + + ex_assign_manuf_info(dev, mi_resp); +out: + kfree(mi_req); + kfree(mi_resp); + return res; +} + +#define PC_REQ_SIZE 44 +#define PC_RESP_SIZE 8 + +int sas_smp_phy_control(struct domain_device *dev, int phy_id, + enum phy_func phy_func) +{ + u8 *pc_req; + u8 *pc_resp; + int res; + + pc_req = alloc_smp_req(PC_REQ_SIZE); + if (!pc_req) + return -ENOMEM; + + pc_resp = alloc_smp_resp(PC_RESP_SIZE); + if (!pc_resp) { + kfree(pc_req); + return -ENOMEM; + } + + pc_req[1] = SMP_PHY_CONTROL; + pc_req[9] = phy_id; + pc_req[10]= phy_func; + + res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); + + kfree(pc_resp); + kfree(pc_req); + return res; +} + +static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + + sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE); + phy->linkrate = PHY_DISABLED; +} + +static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) + sas_ex_disable_phy(dev, i); + } +} + +static int sas_dev_present_in_domain(struct asd_sas_port *port, + u8 *sas_addr) +{ + struct domain_device *dev; + + if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) + return 1; + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) + return 1; + } + return 0; +} + +#define RPEL_REQ_SIZE 16 +#define RPEL_RESP_SIZE 32 +int sas_smp_get_phy_events(struct sas_phy *phy) +{ + int res; + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *dev = sas_find_dev_by_rphy(rphy); + u8 *req = alloc_smp_req(RPEL_REQ_SIZE); + u8 *resp = kzalloc(RPEL_RESP_SIZE, GFP_KERNEL); + + if (!resp) + return -ENOMEM; + + req[1] = SMP_REPORT_PHY_ERR_LOG; + req[9] = phy->number; + + res = smp_execute_task(dev, req, RPEL_REQ_SIZE, + resp, RPEL_RESP_SIZE); + + if (!res) + goto out; + + phy->invalid_dword_count = scsi_to_u32(&resp[12]); + phy->running_disparity_error_count = scsi_to_u32(&resp[16]); + phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); + phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); + + out: + kfree(resp); + return res; + +} + +#define RPS_REQ_SIZE 16 +#define RPS_RESP_SIZE 60 + +static int sas_get_report_phy_sata(struct domain_device *dev, + int phy_id, + struct smp_resp *rps_resp) +{ + int res; + u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); + + if (!rps_req) + return -ENOMEM; + + rps_req[1] = SMP_REPORT_PHY_SATA; + rps_req[9] = phy_id; + + res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, + rps_resp, RPS_RESP_SIZE); + + kfree(rps_req); + return 0; +} + +static void sas_ex_get_linkrate(struct domain_device *parent, + struct domain_device *child, + struct ex_phy *parent_phy) +{ + struct expander_device *parent_ex = &parent->ex_dev; + struct sas_port *port; + int i; + + child->pathways = 0; + + port = parent_phy->port; + + for (i = 0; i < parent_ex->num_phys; i++) { + struct ex_phy *phy = &parent_ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(child->sas_addr)) { + + child->min_linkrate = min(parent->min_linkrate, + phy->linkrate); + child->max_linkrate = max(parent->max_linkrate, + phy->linkrate); + child->pathways++; + sas_port_add_phy(port, phy->phy); + } + } + child->linkrate = min(parent_phy->linkrate, child->max_linkrate); + child->pathways = min(child->pathways, parent->pathways); +} + +static struct domain_device *sas_ex_discover_end_dev( + struct domain_device *parent, int phy_id) +{ + struct expander_device *parent_ex = &parent->ex_dev; + struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; + struct domain_device *child = NULL; + struct sas_rphy *rphy; + int res; + + if (phy->attached_sata_host || phy->attached_sata_ps) + return NULL; + + child = kzalloc(sizeof(*child), GFP_KERNEL); + if (!child) + return NULL; + + child->parent = parent; + child->port = parent->port; + child->iproto = phy->attached_iproto; + memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + sas_hash_addr(child->hashed_sas_addr, child->sas_addr); + phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); + BUG_ON(!phy->port); + /* FIXME: better error handling*/ + BUG_ON(sas_port_add(phy->port) != 0); + sas_ex_get_linkrate(parent, child, phy); + + if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { + child->dev_type = SATA_DEV; + if (phy->attached_tproto & SAS_PROTO_STP) + child->tproto = phy->attached_tproto; + if (phy->attached_sata_dev) + child->tproto |= SATA_DEV; + res = sas_get_report_phy_sata(parent, phy_id, + &child->sata_dev.rps_resp); + if (res) { + SAS_DPRINTK("report phy sata to %016llx:0x%x returned " + "0x%x\n", SAS_ADDR(parent->sas_addr), + phy_id, res); + kfree(child); + return NULL; + } + memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, + sizeof(struct dev_to_host_fis)); + sas_init_dev(child); + res = sas_discover_sata(child); + if (res) { + SAS_DPRINTK("sas_discover_sata() for device %16llx at " + "%016llx:0x%x returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, res); + kfree(child); + return NULL; + } + } else if (phy->attached_tproto & SAS_PROTO_SSP) { + child->dev_type = SAS_END_DEV; + rphy = sas_end_device_alloc(phy->port); + /* FIXME: error handling */ + BUG_ON(!rphy); + child->tproto = phy->attached_tproto; + sas_init_dev(child); + + child->rphy = rphy; + sas_fill_in_rphy(child, rphy); + + spin_lock(&parent->port->dev_list_lock); + list_add_tail(&child->dev_list_node, &parent->port->dev_list); + spin_unlock(&parent->port->dev_list_lock); + + res = sas_discover_end_dev(child); + if (res) { + SAS_DPRINTK("sas_discover_end_dev() for device %16llx " + "at %016llx:0x%x returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, res); + /* FIXME: this kfrees list elements without removing them */ + //kfree(child); + return NULL; + } + } else { + SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", + phy->attached_tproto, SAS_ADDR(parent->sas_addr), + phy_id); + } + + list_add_tail(&child->siblings, &parent_ex->children); + return child; +} + +static struct domain_device *sas_ex_discover_expander( + struct domain_device *parent, int phy_id) +{ + struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); + struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; + struct domain_device *child = NULL; + struct sas_rphy *rphy; + struct sas_expander_device *edev; + struct asd_sas_port *port; + int res; + + if (phy->routing_attr == DIRECT_ROUTING) { + SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " + "allowed\n", + SAS_ADDR(parent->sas_addr), phy_id, + SAS_ADDR(phy->attached_sas_addr), + phy->attached_phy_id); + return NULL; + } + child = kzalloc(sizeof(*child), GFP_KERNEL); + if (!child) + return NULL; + + phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); + /* FIXME: better error handling */ + BUG_ON(sas_port_add(phy->port) != 0); + + + switch (phy->attached_dev_type) { + case EDGE_DEV: + rphy = sas_expander_alloc(phy->port, + SAS_EDGE_EXPANDER_DEVICE); + break; + case FANOUT_DEV: + rphy = sas_expander_alloc(phy->port, + SAS_FANOUT_EXPANDER_DEVICE); + break; + default: + rphy = NULL; /* shut gcc up */ + BUG(); + } + port = parent->port; + child->rphy = rphy; + edev = rphy_to_expander_device(rphy); + child->dev_type = phy->attached_dev_type; + child->parent = parent; + child->port = port; + child->iproto = phy->attached_iproto; + child->tproto = phy->attached_tproto; + memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + sas_hash_addr(child->hashed_sas_addr, child->sas_addr); + sas_ex_get_linkrate(parent, child, phy); + edev->level = parent_ex->level + 1; + parent->port->disc.max_level = max(parent->port->disc.max_level, + edev->level); + sas_init_dev(child); + sas_fill_in_rphy(child, rphy); + sas_rphy_add(rphy); + + spin_lock(&parent->port->dev_list_lock); + list_add_tail(&child->dev_list_node, &parent->port->dev_list); + spin_unlock(&parent->port->dev_list_lock); + + res = sas_discover_expander(child); + if (res) { + kfree(child); + return NULL; + } + list_add_tail(&child->siblings, &parent->ex_dev.children); + return child; +} + +static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + struct domain_device *child = NULL; + int res = 0; + + /* Phy state */ + if (ex_phy->linkrate == PHY_SPINUP_HOLD) { + if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET)) + res = sas_ex_phy_discover(dev, phy_id); + if (res) + return res; + } + + /* Parent and domain coherency */ + if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == + SAS_ADDR(dev->port->sas_addr))) { + sas_add_parent_port(dev, phy_id); + return 0; + } + if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == + SAS_ADDR(dev->parent->sas_addr))) { + sas_add_parent_port(dev, phy_id); + if (ex_phy->routing_attr == TABLE_ROUTING) + sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); + return 0; + } + + if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) + sas_ex_disable_port(dev, ex_phy->attached_sas_addr); + + if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->routing_attr == DIRECT_ROUTING) { + memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + sas_configure_routing(dev, ex_phy->attached_sas_addr); + } + return 0; + } else if (ex_phy->linkrate == PHY_LINKRATE_UNKNOWN) + return 0; + + if (ex_phy->attached_dev_type != SAS_END_DEV && + ex_phy->attached_dev_type != FANOUT_DEV && + ex_phy->attached_dev_type != EDGE_DEV) { + SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " + "phy 0x%x\n", ex_phy->attached_dev_type, + SAS_ADDR(dev->sas_addr), + phy_id); + return 0; + } + + res = sas_configure_routing(dev, ex_phy->attached_sas_addr); + if (res) { + SAS_DPRINTK("configure routing for dev %016llx " + "reported 0x%x. Forgotten\n", + SAS_ADDR(ex_phy->attached_sas_addr), res); + sas_disable_routing(dev, ex_phy->attached_sas_addr); + return res; + } + + switch (ex_phy->attached_dev_type) { + case SAS_END_DEV: + child = sas_ex_discover_end_dev(dev, phy_id); + break; + case FANOUT_DEV: + if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { + SAS_DPRINTK("second fanout expander %016llx phy 0x%x " + "attached to ex %016llx phy 0x%x\n", + SAS_ADDR(ex_phy->attached_sas_addr), + ex_phy->attached_phy_id, + SAS_ADDR(dev->sas_addr), + phy_id); + sas_ex_disable_phy(dev, phy_id); + break; + } else + memcpy(dev->port->disc.fanout_sas_addr, + ex_phy->attached_sas_addr, SAS_ADDR_SIZE); + /* fallthrough */ + case EDGE_DEV: + child = sas_ex_discover_expander(dev, phy_id); + break; + default: + break; + } + + if (child) { + int i; + + for (i = 0; i < ex->num_phys; i++) { + if (ex->ex_phy[i].phy_state == PHY_VACANT || + ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == + SAS_ADDR(child->sas_addr)) + ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; + } + } + + return res; +} + +static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if ((phy->attached_dev_type == EDGE_DEV || + phy->attached_dev_type == FANOUT_DEV) && + phy->routing_attr == SUBTRACTIVE_ROUTING) { + + memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); + + return 1; + } + } + return 0; +} + +static int sas_check_level_subtractive_boundary(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + struct domain_device *child; + u8 sub_addr[8] = {0, }; + + list_for_each_entry(child, &ex->children, siblings) { + if (child->dev_type != EDGE_DEV && + child->dev_type != FANOUT_DEV) + continue; + if (sub_addr[0] == 0) { + sas_find_sub_addr(child, sub_addr); + continue; + } else { + u8 s2[8]; + + if (sas_find_sub_addr(child, s2) && + (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { + + SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " + "diverges from subtractive " + "boundary %016llx\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(child->sas_addr), + SAS_ADDR(s2), + SAS_ADDR(sub_addr)); + + sas_ex_disable_port(child, s2); + } + } + } + return 0; +} +/** + * sas_ex_discover_devices -- discover devices attached to this expander + * dev: pointer to the expander domain device + * single: if you want to do a single phy, else set to -1; + * + * Configure this expander for use with its devices and register the + * devices of this expander. + */ +static int sas_ex_discover_devices(struct domain_device *dev, int single) +{ + struct expander_device *ex = &dev->ex_dev; + int i = 0, end = ex->num_phys; + int res = 0; + + if (0 <= single && single < end) { + i = single; + end = i+1; + } + + for ( ; i < end; i++) { + struct ex_phy *ex_phy = &ex->ex_phy[i]; + + if (ex_phy->phy_state == PHY_VACANT || + ex_phy->phy_state == PHY_NOT_PRESENT || + ex_phy->phy_state == PHY_DEVICE_DISCOVERED) + continue; + + switch (ex_phy->linkrate) { + case PHY_DISABLED: + case PHY_RESET_PROBLEM: + case PHY_PORT_SELECTOR: + continue; + default: + res = sas_ex_discover_dev(dev, i); + if (res) + break; + continue; + } + } + + if (!res) + sas_check_level_subtractive_boundary(dev); + + return res; +} + +static int sas_check_ex_subtractive_boundary(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + u8 *sub_sas_addr = NULL; + + if (dev->dev_type != EDGE_DEV) + return 0; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if ((phy->attached_dev_type == FANOUT_DEV || + phy->attached_dev_type == EDGE_DEV) && + phy->routing_attr == SUBTRACTIVE_ROUTING) { + + if (!sub_sas_addr) + sub_sas_addr = &phy->attached_sas_addr[0]; + else if (SAS_ADDR(sub_sas_addr) != + SAS_ADDR(phy->attached_sas_addr)) { + + SAS_DPRINTK("ex %016llx phy 0x%x " + "diverges(%016llx) on subtractive " + "boundary(%016llx). Disabled\n", + SAS_ADDR(dev->sas_addr), i, + SAS_ADDR(phy->attached_sas_addr), + SAS_ADDR(sub_sas_addr)); + sas_ex_disable_phy(dev, i); + } + } + } + return 0; +} + +static void sas_print_parent_topology_bug(struct domain_device *child, + struct ex_phy *parent_phy, + struct ex_phy *child_phy) +{ + static const char ra_char[] = { + [DIRECT_ROUTING] = 'D', + [SUBTRACTIVE_ROUTING] = 'S', + [TABLE_ROUTING] = 'T', + }; + static const char *ex_type[] = { + [EDGE_DEV] = "edge", + [FANOUT_DEV] = "fanout", + }; + struct domain_device *parent = child->parent; + + sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x " + "has %c:%c routing link!\n", + + ex_type[parent->dev_type], + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + + ex_type[child->dev_type], + SAS_ADDR(child->sas_addr), + child_phy->phy_id, + + ra_char[parent_phy->routing_attr], + ra_char[child_phy->routing_attr]); +} + +static int sas_check_eeds(struct domain_device *child, + struct ex_phy *parent_phy, + struct ex_phy *child_phy) +{ + int res = 0; + struct domain_device *parent = child->parent; + + if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { + res = -ENODEV; + SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " + "phy S:0x%x, while there is a fanout ex %016llx\n", + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + SAS_ADDR(child->sas_addr), + child_phy->phy_id, + SAS_ADDR(parent->port->disc.fanout_sas_addr)); + } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { + memcpy(parent->port->disc.eeds_a, parent->sas_addr, + SAS_ADDR_SIZE); + memcpy(parent->port->disc.eeds_b, child->sas_addr, + SAS_ADDR_SIZE); + } else if (((SAS_ADDR(parent->port->disc.eeds_a) == + SAS_ADDR(parent->sas_addr)) || + (SAS_ADDR(parent->port->disc.eeds_a) == + SAS_ADDR(child->sas_addr))) + && + ((SAS_ADDR(parent->port->disc.eeds_b) == + SAS_ADDR(parent->sas_addr)) || + (SAS_ADDR(parent->port->disc.eeds_b) == + SAS_ADDR(child->sas_addr)))) + ; + else { + res = -ENODEV; + SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " + "phy 0x%x link forms a third EEDS!\n", + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + SAS_ADDR(child->sas_addr), + child_phy->phy_id); + } + + return res; +} + +/* Here we spill over 80 columns. It is intentional. + */ +static int sas_check_parent_topology(struct domain_device *child) +{ + struct expander_device *child_ex = &child->ex_dev; + struct expander_device *parent_ex; + int i; + int res = 0; + + if (!child->parent) + return 0; + + if (child->parent->dev_type != EDGE_DEV && + child->parent->dev_type != FANOUT_DEV) + return 0; + + parent_ex = &child->parent->ex_dev; + + for (i = 0; i < parent_ex->num_phys; i++) { + struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; + struct ex_phy *child_phy; + + if (parent_phy->phy_state == PHY_VACANT || + parent_phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) + continue; + + child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; + + switch (child->parent->dev_type) { + case EDGE_DEV: + if (child->dev_type == FANOUT_DEV) { + if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || + child_phy->routing_attr != TABLE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { + if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { + res = sas_check_eeds(child, parent_phy, child_phy); + } else if (child_phy->routing_attr != TABLE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + } else if (parent_phy->routing_attr == TABLE_ROUTING && + child_phy->routing_attr != SUBTRACTIVE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + break; + case FANOUT_DEV: + if (parent_phy->routing_attr != TABLE_ROUTING || + child_phy->routing_attr != SUBTRACTIVE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + break; + default: + break; + } + } + + return res; +} + +#define RRI_REQ_SIZE 16 +#define RRI_RESP_SIZE 44 + +static int sas_configure_present(struct domain_device *dev, int phy_id, + u8 *sas_addr, int *index, int *present) +{ + int i, res = 0; + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + u8 *rri_req; + u8 *rri_resp; + + *present = 0; + *index = 0; + + rri_req = alloc_smp_req(RRI_REQ_SIZE); + if (!rri_req) + return -ENOMEM; + + rri_resp = alloc_smp_resp(RRI_RESP_SIZE); + if (!rri_resp) { + kfree(rri_req); + return -ENOMEM; + } + + rri_req[1] = SMP_REPORT_ROUTE_INFO; + rri_req[9] = phy_id; + + for (i = 0; i < ex->max_route_indexes ; i++) { + *(__be16 *)(rri_req+6) = cpu_to_be16(i); + res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, + RRI_RESP_SIZE); + if (res) + goto out; + res = rri_resp[2]; + if (res == SMP_RESP_NO_INDEX) { + SAS_DPRINTK("overflow of indexes: dev %016llx " + "phy 0x%x index 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, i); + goto out; + } else if (res != SMP_RESP_FUNC_ACC) { + SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " + "result 0x%x\n", __FUNCTION__, + SAS_ADDR(dev->sas_addr), phy_id, i, res); + goto out; + } + if (SAS_ADDR(sas_addr) != 0) { + if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { + *index = i; + if ((rri_resp[12] & 0x80) == 0x80) + *present = 0; + else + *present = 1; + goto out; + } else if (SAS_ADDR(rri_resp+16) == 0) { + *index = i; + *present = 0; + goto out; + } + } else if (SAS_ADDR(rri_resp+16) == 0 && + phy->last_da_index < i) { + phy->last_da_index = i; + *index = i; + *present = 0; + goto out; + } + } + res = -1; +out: + kfree(rri_req); + kfree(rri_resp); + return res; +} + +#define CRI_REQ_SIZE 44 +#define CRI_RESP_SIZE 8 + +static int sas_configure_set(struct domain_device *dev, int phy_id, + u8 *sas_addr, int index, int include) +{ + int res; + u8 *cri_req; + u8 *cri_resp; + + cri_req = alloc_smp_req(CRI_REQ_SIZE); + if (!cri_req) + return -ENOMEM; + + cri_resp = alloc_smp_resp(CRI_RESP_SIZE); + if (!cri_resp) { + kfree(cri_req); + return -ENOMEM; + } + + cri_req[1] = SMP_CONF_ROUTE_INFO; + *(__be16 *)(cri_req+6) = cpu_to_be16(index); + cri_req[9] = phy_id; + if (SAS_ADDR(sas_addr) == 0 || !include) + cri_req[12] |= 0x80; + memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); + + res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, + CRI_RESP_SIZE); + if (res) + goto out; + res = cri_resp[2]; + if (res == SMP_RESP_NO_INDEX) { + SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " + "index 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, index); + } +out: + kfree(cri_req); + kfree(cri_resp); + return res; +} + +static int sas_configure_phy(struct domain_device *dev, int phy_id, + u8 *sas_addr, int include) +{ + int index; + int present; + int res; + + res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); + if (res) + return res; + if (include ^ present) + return sas_configure_set(dev, phy_id, sas_addr, index,include); + + return res; +} + +/** + * sas_configure_parent -- configure routing table of parent + * parent: parent expander + * child: child expander + * sas_addr: SAS port identifier of device directly attached to child + */ +static int sas_configure_parent(struct domain_device *parent, + struct domain_device *child, + u8 *sas_addr, int include) +{ + struct expander_device *ex_parent = &parent->ex_dev; + int res = 0; + int i; + + if (parent->parent) { + res = sas_configure_parent(parent->parent, parent, sas_addr, + include); + if (res) + return res; + } + + if (ex_parent->conf_route_table == 0) { + SAS_DPRINTK("ex %016llx has self-configuring routing table\n", + SAS_ADDR(parent->sas_addr)); + return 0; + } + + for (i = 0; i < ex_parent->num_phys; i++) { + struct ex_phy *phy = &ex_parent->ex_phy[i]; + + if ((phy->routing_attr == TABLE_ROUTING) && + (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(child->sas_addr))) { + res = sas_configure_phy(parent, i, sas_addr, include); + if (res) + return res; + } + } + + return res; +} + +/** + * sas_configure_routing -- configure routing + * dev: expander device + * sas_addr: port identifier of device directly attached to the expander device + */ +static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) +{ + if (dev->parent) + return sas_configure_parent(dev->parent, dev, sas_addr, 1); + return 0; +} + +static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) +{ + if (dev->parent) + return sas_configure_parent(dev->parent, dev, sas_addr, 0); + return 0; +} + +#if 0 +#define SMP_BIN_ATTR_NAME "smp_portal" + +static void sas_ex_smp_hook(struct domain_device *dev) +{ + struct expander_device *ex_dev = &dev->ex_dev; + struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr; + + memset(bin_attr, 0, sizeof(*bin_attr)); + + bin_attr->attr.name = SMP_BIN_ATTR_NAME; + bin_attr->attr.owner = THIS_MODULE; + bin_attr->attr.mode = 0600; + + bin_attr->size = 0; + bin_attr->private = NULL; + bin_attr->read = smp_portal_read; + bin_attr->write= smp_portal_write; + bin_attr->mmap = NULL; + + ex_dev->smp_portal_pid = -1; + init_MUTEX(&ex_dev->smp_sema); +} +#endif + +/** + * sas_discover_expander -- expander discovery + * @ex: pointer to expander domain device + * + * See comment in sas_discover_sata(). + */ +static int sas_discover_expander(struct domain_device *dev) +{ + int res; + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + res = sas_ex_general(dev); + if (res) + goto out_err; + res = sas_ex_manuf_info(dev); + if (res) + goto out_err; + + res = sas_expander_discover(dev); + if (res) { + SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", + SAS_ADDR(dev->sas_addr), res); + goto out_err; + } + + sas_check_ex_subtractive_boundary(dev); + res = sas_check_parent_topology(dev); + if (res) + goto out_err; + return 0; +out_err: + sas_notify_lldd_dev_gone(dev); + return res; +} + +static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) +{ + int res = 0; + struct domain_device *dev; + + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (dev->dev_type == EDGE_DEV || + dev->dev_type == FANOUT_DEV) { + struct sas_expander_device *ex = + rphy_to_expander_device(dev->rphy); + + if (level == ex->level) + res = sas_ex_discover_devices(dev, -1); + else if (level > 0) + res = sas_ex_discover_devices(port->port_dev, -1); + + } + } + + return res; +} + +static int sas_ex_bfs_disc(struct asd_sas_port *port) +{ + int res; + int level; + + do { + level = port->disc.max_level; + res = sas_ex_level_discovery(port, level); + mb(); + } while (level < port->disc.max_level); + + return res; +} + +int sas_discover_root_expander(struct domain_device *dev) +{ + int res; + struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); + + sas_rphy_add(dev->rphy); + + ex->level = dev->port->disc.max_level; /* 0 */ + res = sas_discover_expander(dev); + if (!res) + sas_ex_bfs_disc(dev->port); + + return res; +} + +/* ---------- Domain revalidation ---------- */ + +static int sas_get_phy_discover(struct domain_device *dev, + int phy_id, struct smp_resp *disc_resp) +{ + int res; + u8 *disc_req; + + disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); + if (!disc_req) + return -ENOMEM; + + disc_req[1] = SMP_DISCOVER; + disc_req[9] = phy_id; + + res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (res) + goto out; + else if (disc_resp->result != SMP_RESP_FUNC_ACC) { + res = disc_resp->result; + goto out; + } +out: + kfree(disc_req); + return res; +} + +static int sas_get_phy_change_count(struct domain_device *dev, + int phy_id, int *pcc) +{ + int res; + struct smp_resp *disc_resp; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); + if (!res) + *pcc = disc_resp->disc.change_count; + + kfree(disc_resp); + return res; +} + +static int sas_get_phy_attached_sas_addr(struct domain_device *dev, + int phy_id, u8 *attached_sas_addr) +{ + int res; + struct smp_resp *disc_resp; + struct discover_resp *dr; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + dr = &disc_resp->disc; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); + if (!res) { + memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8); + if (dr->attached_dev_type == 0) + memset(attached_sas_addr, 0, 8); + } + kfree(disc_resp); + return res; +} + +static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, + int from_phy) +{ + struct expander_device *ex = &dev->ex_dev; + int res = 0; + int i; + + for (i = from_phy; i < ex->num_phys; i++) { + int phy_change_count = 0; + + res = sas_get_phy_change_count(dev, i, &phy_change_count); + if (res) + goto out; + else if (phy_change_count != ex->ex_phy[i].phy_change_count) { + ex->ex_phy[i].phy_change_count = phy_change_count; + *phy_id = i; + return 0; + } + } +out: + return res; +} + +static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) +{ + int res; + u8 *rg_req; + struct smp_resp *rg_resp; + + rg_req = alloc_smp_req(RG_REQ_SIZE); + if (!rg_req) + return -ENOMEM; + + rg_resp = alloc_smp_resp(RG_RESP_SIZE); + if (!rg_resp) { + kfree(rg_req); + return -ENOMEM; + } + + rg_req[1] = SMP_REPORT_GENERAL; + + res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, + RG_RESP_SIZE); + if (res) + goto out; + if (rg_resp->result != SMP_RESP_FUNC_ACC) { + res = rg_resp->result; + goto out; + } + + *ecc = be16_to_cpu(rg_resp->rg.change_count); +out: + kfree(rg_resp); + kfree(rg_req); + return res; +} + +static int sas_find_bcast_dev(struct domain_device *dev, + struct domain_device **src_dev) +{ + struct expander_device *ex = &dev->ex_dev; + int ex_change_count = -1; + int res; + + res = sas_get_ex_change_count(dev, &ex_change_count); + if (res) + goto out; + if (ex_change_count != -1 && + ex_change_count != ex->ex_change_count) { + *src_dev = dev; + ex->ex_change_count = ex_change_count; + } else { + struct domain_device *ch; + + list_for_each_entry(ch, &ex->children, siblings) { + if (ch->dev_type == EDGE_DEV || + ch->dev_type == FANOUT_DEV) { + res = sas_find_bcast_dev(ch, src_dev); + if (src_dev) + return res; + } + } + } +out: + return res; +} + +static void sas_unregister_ex_tree(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + struct domain_device *child, *n; + + list_for_each_entry_safe(child, n, &ex->children, siblings) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + sas_unregister_ex_tree(child); + else + sas_unregister_dev(child); + } + sas_unregister_dev(dev); +} + +static void sas_unregister_devs_sas_addr(struct domain_device *parent, + int phy_id) +{ + struct expander_device *ex_dev = &parent->ex_dev; + struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; + struct domain_device *child, *n; + + list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { + if (SAS_ADDR(child->sas_addr) == + SAS_ADDR(phy->attached_sas_addr)) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + sas_unregister_ex_tree(child); + else + sas_unregister_dev(child); + break; + } + } + sas_disable_routing(parent, phy->attached_sas_addr); + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + sas_port_delete_phy(phy->port, phy->phy); + if (phy->port->num_phys == 0) + sas_port_delete(phy->port); + phy->port = NULL; +} + +static int sas_discover_bfs_by_root_level(struct domain_device *root, + const int level) +{ + struct expander_device *ex_root = &root->ex_dev; + struct domain_device *child; + int res = 0; + + list_for_each_entry(child, &ex_root->children, siblings) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) { + struct sas_expander_device *ex = + rphy_to_expander_device(child->rphy); + + if (level > ex->level) + res = sas_discover_bfs_by_root_level(child, + level); + else if (level == ex->level) + res = sas_ex_discover_devices(child, -1); + } + } + return res; +} + +static int sas_discover_bfs_by_root(struct domain_device *dev) +{ + int res; + struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); + int level = ex->level+1; + + res = sas_ex_discover_devices(dev, -1); + if (res) + goto out; + do { + res = sas_discover_bfs_by_root_level(dev, level); + mb(); + level += 1; + } while (level <= dev->port->disc.max_level); +out: + return res; +} + +static int sas_discover_new(struct domain_device *dev, int phy_id) +{ + struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; + struct domain_device *child; + int res; + + SAS_DPRINTK("ex %016llx phy%d new device attached\n", + SAS_ADDR(dev->sas_addr), phy_id); + res = sas_ex_phy_discover(dev, phy_id); + if (res) + goto out; + res = sas_ex_discover_devices(dev, phy_id); + if (res) + goto out; + list_for_each_entry(child, &dev->ex_dev.children, siblings) { + if (SAS_ADDR(child->sas_addr) == + SAS_ADDR(ex_phy->attached_sas_addr)) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + res = sas_discover_bfs_by_root(child); + break; + } + } +out: + return res; +} + +static int sas_rediscover_dev(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + u8 attached_sas_addr[8]; + int res; + + res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr); + switch (res) { + case SMP_RESP_NO_PHY: + phy->phy_state = PHY_NOT_PRESENT; + sas_unregister_devs_sas_addr(dev, phy_id); + goto out; break; + case SMP_RESP_PHY_VACANT: + phy->phy_state = PHY_VACANT; + sas_unregister_devs_sas_addr(dev, phy_id); + goto out; break; + case SMP_RESP_FUNC_ACC: + break; + } + + if (SAS_ADDR(attached_sas_addr) == 0) { + phy->phy_state = PHY_EMPTY; + sas_unregister_devs_sas_addr(dev, phy_id); + } else if (SAS_ADDR(attached_sas_addr) == + SAS_ADDR(phy->attached_sas_addr)) { + SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", + SAS_ADDR(dev->sas_addr), phy_id); + } else + res = sas_discover_new(dev, phy_id); +out: + return res; +} + +static int sas_rediscover(struct domain_device *dev, const int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; + int res = 0; + int i; + + SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", + SAS_ADDR(dev->sas_addr), phy_id); + + if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (i == phy_id) + continue; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(changed_phy->attached_sas_addr)) { + SAS_DPRINTK("phy%d part of wide port with " + "phy%d\n", phy_id, i); + goto out; + } + } + res = sas_rediscover_dev(dev, phy_id); + } else + res = sas_discover_new(dev, phy_id); +out: + return res; +} + +/** + * sas_revalidate_domain -- revalidate the domain + * @port: port to the domain of interest + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain. + */ +int sas_ex_revalidate_domain(struct domain_device *port_dev) +{ + int res; + struct domain_device *dev = NULL; + + res = sas_find_bcast_dev(port_dev, &dev); + if (res) + goto out; + if (dev) { + struct expander_device *ex = &dev->ex_dev; + int i = 0, phy_id; + + do { + phy_id = -1; + res = sas_find_bcast_phy(dev, &phy_id, i); + if (phy_id == -1) + break; + res = sas_rediscover(dev, phy_id); + i = phy_id + 1; + } while (i < ex->num_phys); + } +out: + return res; +} + +#if 0 +/* ---------- SMP portal ---------- */ + +static ssize_t smp_portal_write(struct kobject *kobj, char *buf, loff_t offs, + size_t size) +{ + struct domain_device *dev = to_dom_device(kobj); + struct expander_device *ex = &dev->ex_dev; + + if (offs != 0) + return -EFBIG; + else if (size == 0) + return 0; + + down_interruptible(&ex->smp_sema); + if (ex->smp_req) + kfree(ex->smp_req); + ex->smp_req = kzalloc(size, GFP_USER); + if (!ex->smp_req) { + up(&ex->smp_sema); + return -ENOMEM; + } + memcpy(ex->smp_req, buf, size); + ex->smp_req_size = size; + ex->smp_portal_pid = current->pid; + up(&ex->smp_sema); + + return size; +} + +static ssize_t smp_portal_read(struct kobject *kobj, char *buf, loff_t offs, + size_t size) +{ + struct domain_device *dev = to_dom_device(kobj); + struct expander_device *ex = &dev->ex_dev; + u8 *smp_resp; + int res = -EINVAL; + + /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact + * it should be 0. + */ + + down_interruptible(&ex->smp_sema); + if (!ex->smp_req || ex->smp_portal_pid != current->pid) + goto out; + + res = 0; + if (size == 0) + goto out; + + res = -ENOMEM; + smp_resp = alloc_smp_resp(size); + if (!smp_resp) + goto out; + res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size, + smp_resp, size); + if (!res) { + memcpy(buf, smp_resp, size); + res = size; + } + + kfree(smp_resp); +out: + kfree(ex->smp_req); + ex->smp_req = NULL; + ex->smp_req_size = 0; + ex->smp_portal_pid = -1; + up(&ex->smp_sema); + return res; +} +#endif diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c new file mode 100644 index 0000000..b961664 --- /dev/null +++ b/drivers/scsi/libsas/sas_init.c @@ -0,0 +1,227 @@ +/* + * Serial Attached SCSI (SAS) Transport Layer initialization + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sas_internal.h" + +#include "../scsi_sas_internal.h" + +kmem_cache_t *sas_task_cache; + +/*------------ SAS addr hash -----------*/ +void sas_hash_addr(u8 *hashed, const u8 *sas_addr) +{ + const u32 poly = 0x00DB2777; + u32 r = 0; + int i; + + for (i = 0; i < 8; i++) { + int b; + for (b = 7; b >= 0; b--) { + r <<= 1; + if ((1 << b) & sas_addr[i]) { + if (!(r & 0x01000000)) + r ^= poly; + } else if (r & 0x01000000) + r ^= poly; + } + } + + hashed[0] = (r >> 16) & 0xFF; + hashed[1] = (r >> 8) & 0xFF ; + hashed[2] = r & 0xFF; +} + + +/* ---------- HA events ---------- */ + +void sas_hae_reset(void *data) +{ + struct sas_ha_struct *ha = data; + + sas_begin_event(HAE_RESET, &ha->event_lock, + &ha->pending); +} + +int sas_register_ha(struct sas_ha_struct *sas_ha) +{ + int error = 0; + + spin_lock_init(&sas_ha->phy_port_lock); + sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); + + if (sas_ha->lldd_queue_size == 0) + sas_ha->lldd_queue_size = 1; + else if (sas_ha->lldd_queue_size == -1) + sas_ha->lldd_queue_size = 128; /* Sanity */ + + error = sas_register_phys(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); + return error; + } + + error = sas_register_ports(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't register sas ports:%d\n", error); + goto Undo_phys; + } + + error = sas_init_events(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't start event thread:%d\n", error); + goto Undo_ports; + } + + if (sas_ha->lldd_max_execute_num > 1) { + error = sas_init_queue(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't start queue thread:%d, " + "running in direct mode\n", error); + sas_ha->lldd_max_execute_num = 1; + } + } + + return 0; + +Undo_ports: + sas_unregister_ports(sas_ha); +Undo_phys: + + return error; +} + +int sas_unregister_ha(struct sas_ha_struct *sas_ha) +{ + if (sas_ha->lldd_max_execute_num > 1) { + sas_shutdown_queue(sas_ha); + } + + sas_unregister_ports(sas_ha); + + return 0; +} + +static int sas_get_linkerrors(struct sas_phy *phy) +{ + if (scsi_is_sas_phy_local(phy)) + /* FIXME: we have no local phy stats + * gathering at this time */ + return -EINVAL; + + return sas_smp_get_phy_events(phy); +} + +static int sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + int ret; + enum phy_func reset_type; + + if (hard_reset) + reset_type = PHY_FUNC_HARD_RESET; + else + reset_type = PHY_FUNC_LINK_RESET; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + ret = i->dft->lldd_control_phy(asd_phy, reset_type); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + ret = sas_smp_phy_control(ddev, phy->number, reset_type); + } + return ret; +} + +static struct sas_function_template sft = { + .phy_reset = sas_phy_reset, + .get_linkerrors = sas_get_linkerrors, +}; + +struct scsi_transport_template * +sas_domain_attach_transport(struct sas_domain_function_template *dft) +{ + struct scsi_transport_template *stt = sas_attach_transport(&sft); + struct sas_internal *i; + + if (!stt) + return stt; + + i = to_sas_internal(stt); + i->dft = dft; + stt->create_work_queue = 1; + stt->eh_timed_out = sas_scsi_timed_out; + stt->eh_strategy_handler = sas_scsi_recover_host; + + return stt; +} +EXPORT_SYMBOL_GPL(sas_domain_attach_transport); + + +void sas_domain_release_transport(struct scsi_transport_template *stt) +{ + sas_release_transport(stt); +} +EXPORT_SYMBOL_GPL(sas_domain_release_transport); + +/* ---------- SAS Class register/unregister ---------- */ + +static int __init sas_class_init(void) +{ + sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!sas_task_cache) + return -ENOMEM; + + return 0; +} + +static void __exit sas_class_exit(void) +{ + kmem_cache_destroy(sas_task_cache); +} + +MODULE_AUTHOR("Luben Tuikov "); +MODULE_DESCRIPTION("SAS Transport Layer"); +MODULE_LICENSE("GPL v2"); + +module_init(sas_class_init); +module_exit(sas_class_exit); + +EXPORT_SYMBOL_GPL(sas_register_ha); +EXPORT_SYMBOL_GPL(sas_unregister_ha); diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h new file mode 100644 index 0000000..89c3976 --- /dev/null +++ b/drivers/scsi/libsas/sas_internal.h @@ -0,0 +1,146 @@ +/* + * Serial Attached SCSI (SAS) class internal header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#ifndef _SAS_INTERNAL_H_ +#define _SAS_INTERNAL_H_ + +#include +#include +#include +#include + +#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) + +#ifdef SAS_DEBUG +#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) +#else +#define SAS_DPRINTK(fmt, ...) +#endif + +void sas_scsi_recover_host(struct Scsi_Host *shost); + +int sas_show_class(enum sas_class class, char *buf); +int sas_show_proto(enum sas_proto proto, char *buf); +int sas_show_linkrate(enum sas_phy_linkrate linkrate, char *buf); +int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf); + +int sas_register_phys(struct sas_ha_struct *sas_ha); +void sas_unregister_phys(struct sas_ha_struct *sas_ha); + +int sas_register_ports(struct sas_ha_struct *sas_ha); +void sas_unregister_ports(struct sas_ha_struct *sas_ha); + +enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); + +int sas_init_queue(struct sas_ha_struct *sas_ha); +int sas_init_events(struct sas_ha_struct *sas_ha); +void sas_shutdown_queue(struct sas_ha_struct *sas_ha); + +void sas_deform_port(struct asd_sas_phy *phy); + +void sas_porte_bytes_dmaed(void *); +void sas_porte_broadcast_rcvd(void *); +void sas_porte_link_reset_err(void *); +void sas_porte_timer_event(void *); +void sas_porte_hard_reset(void *); + +int sas_notify_lldd_dev_found(struct domain_device *); +void sas_notify_lldd_dev_gone(struct domain_device *); + +int sas_smp_phy_control(struct domain_device *dev, int phy_id, + enum phy_func phy_func); +int sas_smp_get_phy_events(struct sas_phy *phy); + +struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); + +void sas_hae_reset(void *); + +static inline void sas_queue_event(int event, spinlock_t *lock, + unsigned long *pending, + struct work_struct *work, + struct Scsi_Host *shost) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + if (test_bit(event, pending)) { + spin_unlock_irqrestore(lock, flags); + return; + } + __set_bit(event, pending); + spin_unlock_irqrestore(lock, flags); + scsi_queue_work(shost, work); +} + +static inline void sas_begin_event(int event, spinlock_t *lock, + unsigned long *pending) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + __clear_bit(event, pending); + spin_unlock_irqrestore(lock, flags); +} + +static inline void sas_fill_in_rphy(struct domain_device *dev, + struct sas_rphy *rphy) +{ + rphy->identify.sas_address = SAS_ADDR(dev->sas_addr); + rphy->identify.initiator_port_protocols = dev->iproto; + rphy->identify.target_port_protocols = dev->tproto; + switch (dev->dev_type) { + case SATA_DEV: + /* FIXME: need sata device type */ + case SAS_END_DEV: + rphy->identify.device_type = SAS_END_DEVICE; + break; + case EDGE_DEV: + rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case FANOUT_DEV: + rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + default: + rphy->identify.device_type = SAS_PHY_UNUSED; + break; + } +} + +static inline void sas_add_parent_port(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + + if (!ex->parent_port) { + ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id); + /* FIXME: error handling */ + BUG_ON(!ex->parent_port); + BUG_ON(sas_port_add(ex->parent_port)); + sas_port_mark_backlink(ex->parent_port); + } + sas_port_add_phy(ex->parent_port, ex_phy->phy); +} + +#endif /* _SAS_INTERNAL_H_ */ diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c new file mode 100644 index 0000000..024ab00 --- /dev/null +++ b/drivers/scsi/libsas/sas_phy.c @@ -0,0 +1,157 @@ +/* + * Serial Attached SCSI (SAS) Phy class + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_internal.h" +#include +#include +#include +#include "../scsi_sas_internal.h" + +/* ---------- Phy events ---------- */ + +static void sas_phye_loss_of_signal(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, + &phy->phy_events_pending); + phy->error = 0; + sas_deform_port(phy); +} + +static void sas_phye_oob_done(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, + &phy->phy_events_pending); + phy->error = 0; +} + +static void sas_phye_oob_error(void *data) +{ + struct asd_sas_phy *phy = data; + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock, + &phy->phy_events_pending); + + sas_deform_port(phy); + + if (!port && phy->enabled && i->dft->lldd_control_phy) { + phy->error++; + switch (phy->error) { + case 1: + case 2: + i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET); + break; + case 3: + default: + phy->error = 0; + phy->enabled = 0; + i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE); + break; + } + } +} + +static void sas_phye_spinup_hold(void *data) +{ + struct asd_sas_phy *phy = data; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock, + &phy->phy_events_pending); + + phy->error = 0; + i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD); +} + +/* ---------- Phy class registration ---------- */ + +int sas_register_phys(struct sas_ha_struct *sas_ha) +{ + int i; + + static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { + [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, + [PHYE_OOB_DONE] = sas_phye_oob_done, + [PHYE_OOB_ERROR] = sas_phye_oob_error, + [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, + }; + + static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { + [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, + [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, + [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, + [PORTE_TIMER_EVENT] = sas_porte_timer_event, + [PORTE_HARD_RESET] = sas_porte_hard_reset, + }; + + /* Now register the phys. */ + for (i = 0; i < sas_ha->num_phys; i++) { + int k; + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + phy->error = 0; + INIT_LIST_HEAD(&phy->port_phy_el); + for (k = 0; k < PORT_NUM_EVENTS; k++) + INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], + phy); + + for (k = 0; k < PHY_NUM_EVENTS; k++) + INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k], + phy); + phy->port = NULL; + phy->ha = sas_ha; + spin_lock_init(&phy->frame_rcvd_lock); + spin_lock_init(&phy->sas_prim_lock); + phy->frame_rcvd_size = 0; + + phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, + i); + if (!phy->phy) + return -ENOMEM; + + phy->phy->identify.initiator_port_protocols = + phy->iproto; + phy->phy->identify.target_port_protocols = phy->tproto; + phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr); + phy->phy->identify.phy_identifier = i; + phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; + phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy_add(phy->phy); + } + + return 0; +} diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c new file mode 100644 index 0000000..253cdcf --- /dev/null +++ b/drivers/scsi/libsas/sas_port.c @@ -0,0 +1,279 @@ +/* + * Serial Attached SCSI (SAS) Port class + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_internal.h" + +#include +#include +#include "../scsi_sas_internal.h" + +/** + * sas_form_port -- add this phy to a port + * @phy: the phy of interest + * + * This function adds this phy to an existing port, thus creating a wide + * port, or it creates a port and adds the phy to the port. + */ +static void sas_form_port(struct asd_sas_phy *phy) +{ + int i; + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *si = + to_sas_internal(sas_ha->core.shost->transportt); + + if (port) { + if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, + SAS_ADDR_SIZE) == 0) + sas_deform_port(phy); + else { + SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", + __FUNCTION__, phy->id, phy->port->id, + phy->port->num_phys); + return; + } + } + + /* find a port */ + spin_lock(&sas_ha->phy_port_lock); + for (i = 0; i < sas_ha->num_phys; i++) { + port = sas_ha->sas_port[i]; + spin_lock(&port->phy_list_lock); + if (*(u64 *) port->sas_addr && + memcmp(port->attached_sas_addr, + phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 && + port->num_phys > 0) { + /* wide port */ + SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, + port->id); + break; + } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) { + memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); + break; + } + spin_unlock(&port->phy_list_lock); + } + + if (i >= sas_ha->num_phys) { + printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", + __FUNCTION__); + spin_unlock(&sas_ha->phy_port_lock); + return; + } + + /* add the phy to the port */ + list_add_tail(&phy->port_phy_el, &port->phy_list); + phy->port = port; + port->num_phys++; + port->phy_mask |= (1U << phy->id); + + if (!port->phy) + port->phy = phy->phy; + + SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id, + port->id, port->phy_mask); + + if (*(u64 *)port->attached_sas_addr == 0) { + port->class = phy->class; + memcpy(port->attached_sas_addr, phy->attached_sas_addr, + SAS_ADDR_SIZE); + port->iproto = phy->iproto; + port->tproto = phy->tproto; + port->oob_mode = phy->oob_mode; + port->linkrate = phy->linkrate; + } else + port->linkrate = max(port->linkrate, phy->linkrate); + spin_unlock(&port->phy_list_lock); + spin_unlock(&sas_ha->phy_port_lock); + + if (!port->port) { + port->port = sas_port_alloc(phy->phy->dev.parent, port->id); + BUG_ON(!port->port); + sas_port_add(port->port); + } + sas_port_add_phy(port->port, phy->phy); + + if (port->port_dev) + port->port_dev->pathways = port->num_phys; + + /* Tell the LLDD about this port formation. */ + if (si->dft->lldd_port_formed) + si->dft->lldd_port_formed(phy); + + sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN); +} + +/** + * sas_deform_port -- remove this phy from the port it belongs to + * @phy: the phy of interest + * + * This is called when the physical link to the other phy has been + * lost (on this phy), in Event thread context. We cannot delay here. + */ +void sas_deform_port(struct asd_sas_phy *phy) +{ + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *si = + to_sas_internal(sas_ha->core.shost->transportt); + + if (!port) + return; /* done by a phy event */ + + if (port->port_dev) + port->port_dev->pathways--; + + if (port->num_phys == 1) { + sas_unregister_domain_devices(port); + sas_port_delete(port->port); + port->port = NULL; + } else + sas_port_delete_phy(port->port, phy->phy); + + + if (si->dft->lldd_port_deformed) + si->dft->lldd_port_deformed(phy); + + spin_lock(&sas_ha->phy_port_lock); + spin_lock(&port->phy_list_lock); + + list_del_init(&phy->port_phy_el); + phy->port = NULL; + port->num_phys--; + port->phy_mask &= ~(1U << phy->id); + + if (port->num_phys == 0) { + INIT_LIST_HEAD(&port->phy_list); + memset(port->sas_addr, 0, SAS_ADDR_SIZE); + memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE); + port->class = 0; + port->iproto = 0; + port->tproto = 0; + port->oob_mode = 0; + port->phy_mask = 0; + } + spin_unlock(&port->phy_list_lock); + spin_unlock(&sas_ha->phy_port_lock); + + return; +} + +/* ---------- SAS port events ---------- */ + +void sas_porte_bytes_dmaed(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_form_port(phy); +} + +void sas_porte_broadcast_rcvd(void *data) +{ + unsigned long flags; + u32 prim; + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, + &phy->port_events_pending); + + spin_lock_irqsave(&phy->sas_prim_lock, flags); + prim = phy->sas_prim; + spin_unlock_irqrestore(&phy->sas_prim_lock, flags); + + SAS_DPRINTK("broadcast received: %d\n", prim); + sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); +} + +void sas_porte_link_reset_err(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_deform_port(phy); +} + +void sas_porte_timer_event(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_deform_port(phy); +} + +void sas_porte_hard_reset(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_deform_port(phy); +} + +/* ---------- SAS port registration ---------- */ + +static void sas_init_port(struct asd_sas_port *port, + struct sas_ha_struct *sas_ha, int i) +{ + port->id = i; + INIT_LIST_HEAD(&port->dev_list); + spin_lock_init(&port->phy_list_lock); + INIT_LIST_HEAD(&port->phy_list); + port->num_phys = 0; + port->phy_mask = 0; + port->ha = sas_ha; + + spin_lock_init(&port->dev_list_lock); +} + +int sas_register_ports(struct sas_ha_struct *sas_ha) +{ + int i; + + /* initialize the ports and discovery */ + for (i = 0; i < sas_ha->num_phys; i++) { + struct asd_sas_port *port = sas_ha->sas_port[i]; + + sas_init_port(port, sas_ha, i); + sas_init_disc(&port->disc, port); + } + return 0; +} + +void sas_unregister_ports(struct sas_ha_struct *sas_ha) +{ + int i; + + for (i = 0; i < sas_ha->num_phys; i++) + if (sas_ha->sas_phy[i]->port) + sas_deform_port(sas_ha->sas_phy[i]); + +} diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c new file mode 100644 index 0000000..43e0e4e --- /dev/null +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -0,0 +1,786 @@ +/* + * Serial Attached SCSI (SAS) class SCSI Host glue. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include "sas_internal.h" + +#include +#include +#include +#include +#include +#include +#include "../scsi_sas_internal.h" + +#include +#include +#include + +/* ---------- SCSI Host glue ---------- */ + +#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) +#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) + +static void sas_scsi_task_done(struct sas_task *task) +{ + struct task_status_struct *ts = &task->task_status; + struct scsi_cmnd *sc = task->uldd_task; + unsigned ts_flags = task->task_state_flags; + int hs = 0, stat = 0; + + if (unlikely(!sc)) { + SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); + list_del_init(&task->list); + sas_free_task(task); + return; + } + + if (ts->resp == SAS_TASK_UNDELIVERED) { + /* transport error */ + hs = DID_NO_CONNECT; + } else { /* ts->resp == SAS_TASK_COMPLETE */ + /* task delivered, what happened afterwards? */ + switch (ts->stat) { + case SAS_DEV_NO_RESPONSE: + case SAS_INTERRUPTED: + case SAS_PHY_DOWN: + case SAS_NAK_R_ERR: + case SAS_OPEN_TO: + hs = DID_NO_CONNECT; + break; + case SAS_DATA_UNDERRUN: + sc->resid = ts->residual; + if (sc->request_bufflen - sc->resid < sc->underflow) + hs = DID_ERROR; + break; + case SAS_DATA_OVERRUN: + hs = DID_ERROR; + break; + case SAS_QUEUE_FULL: + hs = DID_SOFT_ERROR; /* retry */ + break; + case SAS_DEVICE_UNKNOWN: + hs = DID_BAD_TARGET; + break; + case SAS_SG_ERR: + hs = DID_PARITY; + break; + case SAS_OPEN_REJECT: + if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) + hs = DID_SOFT_ERROR; /* retry */ + else + hs = DID_ERROR; + break; + case SAS_PROTO_RESPONSE: + SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP " + "task; please report this\n", + task->dev->port->ha->sas_ha_name); + break; + case SAS_ABORTED_TASK: + hs = DID_ABORT; + break; + case SAM_CHECK_COND: + memcpy(sc->sense_buffer, ts->buf, + max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); + stat = SAM_CHECK_COND; + break; + default: + stat = ts->stat; + break; + } + } + ASSIGN_SAS_TASK(sc, NULL); + sc->result = (hs << 16) | stat; + list_del_init(&task->list); + sas_free_task(task); + /* This is very ugly but this is how SCSI Core works. */ + if (ts_flags & SAS_TASK_STATE_ABORTED) + scsi_finish_command(sc); + else + sc->scsi_done(sc); +} + +static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) +{ + enum task_attribute ta = TASK_ATTR_SIMPLE; + if (cmd->request && blk_rq_tagged(cmd->request)) { + if (cmd->device->ordered_tags && + (cmd->request->flags & REQ_HARDBARRIER)) + ta = TASK_ATTR_HOQ; + } + return ta; +} + +static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, + struct domain_device *dev, + unsigned long gfp_flags) +{ + struct sas_task *task = sas_alloc_task(gfp_flags); + struct scsi_lun lun; + + if (!task) + return NULL; + + *(u32 *)cmd->sense_buffer = 0; + task->uldd_task = cmd; + ASSIGN_SAS_TASK(cmd, task); + + task->dev = dev; + task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ + + task->ssp_task.retry_count = 1; + int_to_scsilun(cmd->device->lun, &lun); + memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); + task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); + memcpy(task->ssp_task.cdb, cmd->cmnd, 16); + + task->scatter = cmd->request_buffer; + task->num_scatter = cmd->use_sg; + task->total_xfer_len = cmd->request_bufflen; + task->data_dir = cmd->sc_data_direction; + + task->task_done = sas_scsi_task_done; + + return task; +} + +static int sas_queue_up(struct sas_task *task) +{ + struct sas_ha_struct *sas_ha = task->dev->port->ha; + struct scsi_core *core = &sas_ha->core; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&core->task_queue_lock, flags); + if (sas_ha->lldd_queue_size < core->task_queue_size + 1) { + spin_unlock_irqrestore(&core->task_queue_lock, flags); + return -SAS_QUEUE_FULL; + } + list_add_tail(&task->list, &core->task_queue); + core->task_queue_size += 1; + spin_unlock_irqrestore(&core->task_queue_lock, flags); + up(&core->queue_thread_sema); + + return 0; +} + +/** + * sas_queuecommand -- Enqueue a command for processing + * @parameters: See SCSI Core documentation + * + * Note: XXX: Remove the host unlock/lock pair when SCSI Core can + * call us without holding an IRQ spinlock... + */ +int sas_queuecommand(struct scsi_cmnd *cmd, + void (*scsi_done)(struct scsi_cmnd *)) +{ + int res = 0; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct Scsi_Host *host = cmd->device->host; + struct sas_internal *i = to_sas_internal(host->transportt); + + spin_unlock_irq(host->host_lock); + + { + struct sas_ha_struct *sas_ha = dev->port->ha; + struct sas_task *task; + + res = -ENOMEM; + task = sas_create_task(cmd, dev, GFP_ATOMIC); + if (!task) + goto out; + + cmd->scsi_done = scsi_done; + /* Queue up, Direct Mode or Task Collector Mode. */ + if (sas_ha->lldd_max_execute_num < 2) + res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); + else + res = sas_queue_up(task); + + /* Examine */ + if (res) { + SAS_DPRINTK("lldd_execute_task returned: %d\n", res); + ASSIGN_SAS_TASK(cmd, NULL); + sas_free_task(task); + if (res == -SAS_QUEUE_FULL) { + cmd->result = DID_SOFT_ERROR << 16; /* retry */ + res = 0; + scsi_done(cmd); + } + goto out; + } + } +out: + spin_lock_irq(host->host_lock); + return res; +} + +static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + if (cmd == my_cmd) + list_del_init(&cmd->eh_entry); + } +} + +static void sas_scsi_clear_queue_I_T(struct list_head *error_q, + struct domain_device *dev) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + struct domain_device *x = cmd_to_domain_dev(cmd); + + if (x == dev) + list_del_init(&cmd->eh_entry); + } +} + +static void sas_scsi_clear_queue_port(struct list_head *error_q, + struct asd_sas_port *port) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct asd_sas_port *x = dev->port; + + if (x == port) + list_del_init(&cmd->eh_entry); + } +} + +enum task_disposition { + TASK_IS_DONE, + TASK_IS_ABORTED, + TASK_IS_AT_LU, + TASK_IS_NOT_AT_LU, +}; + +static enum task_disposition sas_scsi_find_task(struct sas_task *task) +{ + struct sas_ha_struct *ha = task->dev->port->ha; + unsigned long flags; + int i, res; + struct sas_internal *si = + to_sas_internal(task->dev->port->ha->core.shost->transportt); + + if (ha->lldd_max_execute_num > 1) { + struct scsi_core *core = &ha->core; + struct sas_task *t, *n; + + spin_lock_irqsave(&core->task_queue_lock, flags); + list_for_each_entry_safe(t, n, &core->task_queue, list) { + if (task == t) { + list_del_init(&t->list); + spin_unlock_irqrestore(&core->task_queue_lock, + flags); + SAS_DPRINTK("%s: task 0x%p aborted from " + "task_queue\n", + __FUNCTION__, task); + return TASK_IS_ABORTED; + } + } + spin_unlock_irqrestore(&core->task_queue_lock, flags); + } + + for (i = 0; i < 5; i++) { + SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); + res = si->dft->lldd_abort_task(task); + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, + task); + return TASK_IS_DONE; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("%s: task 0x%p is aborted\n", + __FUNCTION__, task); + return TASK_IS_ABORTED; + } else if (si->dft->lldd_query_task) { + SAS_DPRINTK("%s: querying task 0x%p\n", + __FUNCTION__, task); + res = si->dft->lldd_query_task(task); + if (res == TMF_RESP_FUNC_SUCC) { + SAS_DPRINTK("%s: task 0x%p at LU\n", + __FUNCTION__, task); + return TASK_IS_AT_LU; + } else if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("%s: task 0x%p not at LU\n", + __FUNCTION__, task); + return TASK_IS_NOT_AT_LU; + } + } + } + return res; +} + +static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) +{ + int res = TMF_RESP_FUNC_FAILED; + struct scsi_lun lun; + struct sas_internal *i = + to_sas_internal(dev->port->ha->core.shost->transportt); + + int_to_scsilun(cmd->device->lun, &lun); + + SAS_DPRINTK("eh: device %llx LUN %x has the task\n", + SAS_ADDR(dev->sas_addr), + cmd->device->lun); + + if (i->dft->lldd_abort_task_set) + res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); + + if (res == TMF_RESP_FUNC_FAILED) { + if (i->dft->lldd_clear_task_set) + res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); + } + + if (res == TMF_RESP_FUNC_FAILED) { + if (i->dft->lldd_lu_reset) + res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); + } + + return res; +} + +static int sas_recover_I_T(struct domain_device *dev) +{ + int res = TMF_RESP_FUNC_FAILED; + struct sas_internal *i = + to_sas_internal(dev->port->ha->core.shost->transportt); + + SAS_DPRINTK("I_T nexus reset for dev %016llx\n", + SAS_ADDR(dev->sas_addr)); + + if (i->dft->lldd_I_T_nexus_reset) + res = i->dft->lldd_I_T_nexus_reset(dev); + + return res; +} + +void sas_scsi_recover_host(struct Scsi_Host *shost) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + unsigned long flags; + LIST_HEAD(error_q); + struct scsi_cmnd *cmd, *n; + enum task_disposition res = TASK_IS_DONE; + int tmf_resp; + struct sas_internal *i = to_sas_internal(shost->transportt); + + spin_lock_irqsave(shost->host_lock, flags); + list_splice_init(&shost->eh_cmd_q, &error_q); + spin_unlock_irqrestore(shost->host_lock, flags); + + SAS_DPRINTK("Enter %s\n", __FUNCTION__); + + /* All tasks on this list were marked SAS_TASK_STATE_ABORTED + * by sas_scsi_timed_out() callback. + */ +Again: + SAS_DPRINTK("going over list...\n"); + list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { + struct sas_task *task = TO_SAS_TASK(cmd); + + SAS_DPRINTK("trying to find task 0x%p\n", task); + list_del_init(&cmd->eh_entry); + res = sas_scsi_find_task(task); + + cmd->eh_eflags = 0; + shost->host_failed--; + + switch (res) { + case TASK_IS_DONE: + SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, + task); + task->task_done(task); + continue; + case TASK_IS_ABORTED: + SAS_DPRINTK("%s: task 0x%p is aborted\n", + __FUNCTION__, task); + task->task_done(task); + continue; + case TASK_IS_AT_LU: + SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); + tmf_resp = sas_recover_lu(task->dev, cmd); + if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("dev %016llx LU %x is " + "recovered\n", + SAS_ADDR(task->dev), + cmd->device->lun); + task->task_done(task); + sas_scsi_clear_queue_lu(&error_q, cmd); + goto Again; + } + /* fallthrough */ + case TASK_IS_NOT_AT_LU: + SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", + task); + tmf_resp = sas_recover_I_T(task->dev); + if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("I_T %016llx recovered\n", + SAS_ADDR(task->dev->sas_addr)); + task->task_done(task); + sas_scsi_clear_queue_I_T(&error_q, task->dev); + goto Again; + } + /* Hammer time :-) */ + if (i->dft->lldd_clear_nexus_port) { + struct asd_sas_port *port = task->dev->port; + SAS_DPRINTK("clearing nexus for port:%d\n", + port->id); + res = i->dft->lldd_clear_nexus_port(port); + if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("clear nexus port:%d " + "succeeded\n", port->id); + task->task_done(task); + sas_scsi_clear_queue_port(&error_q, + port); + goto Again; + } + } + if (i->dft->lldd_clear_nexus_ha) { + SAS_DPRINTK("clear nexus ha\n"); + res = i->dft->lldd_clear_nexus_ha(ha); + if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("clear nexus ha " + "succeeded\n"); + task->task_done(task); + goto out; + } + } + /* If we are here -- this means that no amount + * of effort could recover from errors. Quite + * possibly the HA just disappeared. + */ + SAS_DPRINTK("error from device %llx, LUN %x " + "couldn't be recovered in any way\n", + SAS_ADDR(task->dev->sas_addr), + cmd->device->lun); + + task->task_done(task); + goto clear_q; + } + } +out: + SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); + return; +clear_q: + SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); + list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { + struct sas_task *task = TO_SAS_TASK(cmd); + list_del_init(&cmd->eh_entry); + task->task_done(task); + } +} + +enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) +{ + struct sas_task *task = TO_SAS_TASK(cmd); + unsigned long flags; + + if (!task) { + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", + cmd, task); + return EH_HANDLED; + } + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", + cmd, task); + return EH_HANDLED; + } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", + cmd, task); + + return EH_NOT_HANDLED; +} + +struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct domain_device *found_dev = NULL; + int i; + + spin_lock(&ha->phy_port_lock); + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + struct domain_device *dev; + + spin_lock(&port->dev_list_lock); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (rphy == dev->rphy) { + found_dev = dev; + spin_unlock(&port->dev_list_lock); + goto found; + } + } + spin_unlock(&port->dev_list_lock); + } + found: + spin_unlock(&ha->phy_port_lock); + + return found_dev; +} + +static inline struct domain_device *sas_find_target(struct scsi_target *starget) +{ + struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); + + return sas_find_dev_by_rphy(rphy); +} + +int sas_target_alloc(struct scsi_target *starget) +{ + struct domain_device *found_dev = sas_find_target(starget); + + if (!found_dev) + return -ENODEV; + + starget->hostdata = found_dev; + return 0; +} + +#define SAS_DEF_QD 32 +#define SAS_MAX_QD 64 + +int sas_slave_configure(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + struct sas_ha_struct *sas_ha; + + BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); + + sas_ha = dev->port->ha; + + sas_read_port_mode_page(scsi_dev); + + if (scsi_dev->tagged_supported) { + scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); + scsi_activate_tcq(scsi_dev, SAS_DEF_QD); + } else { + SAS_DPRINTK("device %llx, LUN %x doesn't support " + "TCQ\n", SAS_ADDR(dev->sas_addr), + scsi_dev->lun); + scsi_dev->tagged_supported = 0; + scsi_set_tag_type(scsi_dev, 0); + scsi_deactivate_tcq(scsi_dev, 1); + } + + return 0; +} + +void sas_slave_destroy(struct scsi_device *scsi_dev) +{ +} + +int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) +{ + int res = min(new_depth, SAS_MAX_QD); + + if (scsi_dev->tagged_supported) + scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), + res); + else { + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + sas_printk("device %llx LUN %x queue depth changed to 1\n", + SAS_ADDR(dev->sas_addr), + scsi_dev->lun); + scsi_adjust_queue_depth(scsi_dev, 0, 1); + res = 1; + } + + return res; +} + +int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) +{ + if (!scsi_dev->tagged_supported) + return 0; + + scsi_deactivate_tcq(scsi_dev, 1); + + scsi_set_tag_type(scsi_dev, qt); + scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); + + return qt; +} + +int sas_bios_param(struct scsi_device *scsi_dev, + struct block_device *bdev, + sector_t capacity, int *hsc) +{ + hsc[0] = 255; + hsc[1] = 63; + sector_div(capacity, 255*63); + hsc[2] = capacity; + + return 0; +} + +/* ---------- Task Collector Thread implementation ---------- */ + +static void sas_queue(struct sas_ha_struct *sas_ha) +{ + struct scsi_core *core = &sas_ha->core; + unsigned long flags; + LIST_HEAD(q); + int can_queue; + int res; + struct sas_internal *i = to_sas_internal(core->shost->transportt); + + spin_lock_irqsave(&core->task_queue_lock, flags); + while (!core->queue_thread_kill && + !list_empty(&core->task_queue)) { + + can_queue = sas_ha->lldd_queue_size - core->task_queue_size; + if (can_queue >= 0) { + can_queue = core->task_queue_size; + list_splice_init(&core->task_queue, &q); + } else { + struct list_head *a, *n; + + can_queue = sas_ha->lldd_queue_size; + list_for_each_safe(a, n, &core->task_queue) { + list_move_tail(a, &q); + if (--can_queue == 0) + break; + } + can_queue = sas_ha->lldd_queue_size; + } + core->task_queue_size -= can_queue; + spin_unlock_irqrestore(&core->task_queue_lock, flags); + { + struct sas_task *task = list_entry(q.next, + struct sas_task, + list); + list_del_init(&q); + res = i->dft->lldd_execute_task(task, can_queue, + GFP_KERNEL); + if (unlikely(res)) + __list_add(&q, task->list.prev, &task->list); + } + spin_lock_irqsave(&core->task_queue_lock, flags); + if (res) { + list_splice_init(&q, &core->task_queue); /*at head*/ + core->task_queue_size += can_queue; + } + } + spin_unlock_irqrestore(&core->task_queue_lock, flags); +} + +static DECLARE_COMPLETION(queue_th_comp); + +/** + * sas_queue_thread -- The Task Collector thread + * @_sas_ha: pointer to struct sas_ha + */ +static int sas_queue_thread(void *_sas_ha) +{ + struct sas_ha_struct *sas_ha = _sas_ha; + struct scsi_core *core = &sas_ha->core; + + daemonize("sas_queue_%d", core->shost->host_no); + current->flags |= PF_NOFREEZE; + + complete(&queue_th_comp); + + while (1) { + down_interruptible(&core->queue_thread_sema); + sas_queue(sas_ha); + if (core->queue_thread_kill) + break; + } + + complete(&queue_th_comp); + + return 0; +} + +int sas_init_queue(struct sas_ha_struct *sas_ha) +{ + int res; + struct scsi_core *core = &sas_ha->core; + + spin_lock_init(&core->task_queue_lock); + core->task_queue_size = 0; + INIT_LIST_HEAD(&core->task_queue); + init_MUTEX_LOCKED(&core->queue_thread_sema); + + res = kernel_thread(sas_queue_thread, sas_ha, 0); + if (res >= 0) + wait_for_completion(&queue_th_comp); + + return res < 0 ? res : 0; +} + +void sas_shutdown_queue(struct sas_ha_struct *sas_ha) +{ + unsigned long flags; + struct scsi_core *core = &sas_ha->core; + struct sas_task *task, *n; + + init_completion(&queue_th_comp); + core->queue_thread_kill = 1; + up(&core->queue_thread_sema); + wait_for_completion(&queue_th_comp); + + if (!list_empty(&core->task_queue)) + SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n", + SAS_ADDR(sas_ha->sas_addr)); + + spin_lock_irqsave(&core->task_queue_lock, flags); + list_for_each_entry_safe(task, n, &core->task_queue, list) { + struct scsi_cmnd *cmd = task->uldd_task; + + list_del_init(&task->list); + + ASSIGN_SAS_TASK(cmd, NULL); + sas_free_task(task); + cmd->result = DID_ABORT << 16; + cmd->scsi_done(cmd); + } + spin_unlock_irqrestore(&core->task_queue_lock, flags); +} + +EXPORT_SYMBOL_GPL(sas_queuecommand); +EXPORT_SYMBOL_GPL(sas_target_alloc); +EXPORT_SYMBOL_GPL(sas_slave_configure); +EXPORT_SYMBOL_GPL(sas_slave_destroy); +EXPORT_SYMBOL_GPL(sas_change_queue_depth); +EXPORT_SYMBOL_GPL(sas_change_queue_type); +EXPORT_SYMBOL_GPL(sas_bios_param); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h new file mode 100644 index 0000000..72acdab --- /dev/null +++ b/include/scsi/libsas.h @@ -0,0 +1,627 @@ +/* + * SAS host prototypes and structures header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#ifndef _LIBSAS_H_ +#define _LIBSAS_H_ + + +#include +#include +#include +#include +#include +#include +#include +#include + +struct block_device; + +enum sas_class { + SAS, + EXPANDER +}; + +enum sas_phy_role { + PHY_ROLE_NONE = 0, + PHY_ROLE_TARGET = 0x40, + PHY_ROLE_INITIATOR = 0x80, +}; + +enum sas_phy_type { + PHY_TYPE_PHYSICAL, + PHY_TYPE_VIRTUAL +}; + +/* The events are mnemonically described in sas_dump.c + * so when updating/adding events here, please also + * update the other file too. + */ +enum ha_event { + HAE_RESET = 0U, + HA_NUM_EVENTS = 1, +}; + +enum port_event { + PORTE_BYTES_DMAED = 0U, + PORTE_BROADCAST_RCVD = 1, + PORTE_LINK_RESET_ERR = 2, + PORTE_TIMER_EVENT = 3, + PORTE_HARD_RESET = 4, + PORT_NUM_EVENTS = 5, +}; + +enum phy_event { + PHYE_LOSS_OF_SIGNAL = 0U, + PHYE_OOB_DONE = 1, + PHYE_OOB_ERROR = 2, + PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */ + PHY_NUM_EVENTS = 4, +}; + +enum discover_event { + DISCE_DISCOVER_DOMAIN = 0U, + DISCE_REVALIDATE_DOMAIN = 1, + DISCE_PORT_GONE = 2, + DISC_NUM_EVENTS = 3, +}; + +/* ---------- Expander Devices ---------- */ + +#define ETASK 0xFA + +#define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj) +#define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\ + attr) + +enum routing_attribute { + DIRECT_ROUTING, + SUBTRACTIVE_ROUTING, + TABLE_ROUTING, +}; + +enum ex_phy_state { + PHY_EMPTY, + PHY_VACANT, + PHY_NOT_PRESENT, + PHY_DEVICE_DISCOVERED +}; + +struct ex_phy { + int phy_id; + + enum ex_phy_state phy_state; + + enum sas_dev_type attached_dev_type; + enum sas_phy_linkrate linkrate; + + u8 attached_sata_host:1; + u8 attached_sata_dev:1; + u8 attached_sata_ps:1; + + enum sas_proto attached_tproto; + enum sas_proto attached_iproto; + + u8 attached_sas_addr[SAS_ADDR_SIZE]; + u8 attached_phy_id; + + u8 phy_change_count; + enum routing_attribute routing_attr; + u8 virtual:1; + + int last_da_index; + + struct sas_phy *phy; + struct sas_port *port; +}; + +struct expander_device { + struct list_head children; + + u16 ex_change_count; + u16 max_route_indexes; + u8 num_phys; + u8 configuring:1; + u8 conf_route_table:1; + u8 enclosure_logical_id[8]; + + struct ex_phy *ex_phy; + struct sas_port *parent_port; +}; + +/* ---------- SATA device ---------- */ +enum ata_command_set { + ATA_COMMAND_SET = 0, + ATAPI_COMMAND_SET = 1, +}; + +struct sata_device { + enum ata_command_set command_set; + struct smp_resp rps_resp; /* report_phy_sata_resp */ + __le16 *identify_device; + __le16 *identify_packet_device; + + u8 port_no; /* port number, if this is a PM (Port) */ + struct list_head children; /* PM Ports if this is a PM */ +}; + +/* ---------- Domain device ---------- */ +struct domain_device { + enum sas_dev_type dev_type; + + enum sas_phy_linkrate linkrate; + enum sas_phy_linkrate min_linkrate; + enum sas_phy_linkrate max_linkrate; + + int pathways; + + struct domain_device *parent; + struct list_head siblings; /* devices on the same level */ + struct asd_sas_port *port; /* shortcut to root of the tree */ + + struct list_head dev_list_node; + + enum sas_proto iproto; + enum sas_proto tproto; + + struct sas_rphy *rphy; + + u8 sas_addr[SAS_ADDR_SIZE]; + u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; + + u8 frame_rcvd[32]; + + union { + struct expander_device ex_dev; + struct sata_device sata_dev; /* STP & directly attached */ + }; + + void *lldd_dev; +}; + +struct sas_discovery { + spinlock_t disc_event_lock; + struct work_struct disc_work[DISC_NUM_EVENTS]; + unsigned long pending; + u8 fanout_sas_addr[8]; + u8 eeds_a[8]; + u8 eeds_b[8]; + int max_level; +}; + + +/* The port struct is Class:RW, driver:RO */ +struct asd_sas_port { +/* private: */ + struct completion port_gone_completion; + + struct sas_discovery disc; + struct domain_device *port_dev; + spinlock_t dev_list_lock; + struct list_head dev_list; + enum sas_phy_linkrate linkrate; + + struct sas_phy *phy; + struct work_struct work; + +/* public: */ + int id; + + enum sas_class class; + u8 sas_addr[SAS_ADDR_SIZE]; + u8 attached_sas_addr[SAS_ADDR_SIZE]; + enum sas_proto iproto; + enum sas_proto tproto; + + enum sas_oob_mode oob_mode; + + spinlock_t phy_list_lock; + struct list_head phy_list; + int num_phys; + u32 phy_mask; + + struct sas_ha_struct *ha; + + struct sas_port *port; + + void *lldd_port; /* not touched by the sas class code */ +}; + +/* The phy pretty much is controlled by the LLDD. + * The class only reads those fields. + */ +struct asd_sas_phy { +/* private: */ + /* protected by ha->event_lock */ + struct work_struct port_events[PORT_NUM_EVENTS]; + struct work_struct phy_events[PHY_NUM_EVENTS]; + + unsigned long port_events_pending; + unsigned long phy_events_pending; + + int error; + + struct sas_phy *phy; + +/* public: */ + /* The following are class:RO, driver:R/W */ + int enabled; /* must be set */ + + int id; /* must be set */ + enum sas_class class; + enum sas_proto iproto; + enum sas_proto tproto; + + enum sas_phy_type type; + enum sas_phy_role role; + enum sas_oob_mode oob_mode; + enum sas_phy_linkrate linkrate; + + u8 *sas_addr; /* must be set */ + u8 attached_sas_addr[SAS_ADDR_SIZE]; /* class:RO, driver: R/W */ + + spinlock_t frame_rcvd_lock; + u8 *frame_rcvd; /* must be set */ + int frame_rcvd_size; + + spinlock_t sas_prim_lock; + u32 sas_prim; + + struct list_head port_phy_el; /* driver:RO */ + struct asd_sas_port *port; /* Class:RW, driver: RO */ + + struct sas_ha_struct *ha; /* may be set; the class sets it anyway */ + + void *lldd_phy; /* not touched by the sas_class_code */ +}; + +struct scsi_core { + struct Scsi_Host *shost; + + spinlock_t task_queue_lock; + struct list_head task_queue; + int task_queue_size; + + struct semaphore queue_thread_sema; + int queue_thread_kill; +}; + +struct sas_ha_struct { +/* private: */ + spinlock_t event_lock; + struct work_struct ha_events[HA_NUM_EVENTS]; + unsigned long pending; + + struct scsi_core core; + +/* public: */ + char *sas_ha_name; + struct pci_dev *pcidev; /* should be set */ + struct module *lldd_module; /* should be set */ + + u8 *sas_addr; /* must be set */ + u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; + + spinlock_t phy_port_lock; + struct asd_sas_phy **sas_phy; /* array of valid pointers, must be set */ + struct asd_sas_port **sas_port; /* array of valid pointers, must be set */ + int num_phys; /* must be set, gt 0, static */ + + /* The class calls this to send a task for execution. */ + int lldd_max_execute_num; + int lldd_queue_size; + + /* LLDD calls these to notify the class of an event. */ + void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); + void (*notify_port_event)(struct asd_sas_phy *, enum port_event); + void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); + + void *lldd_ha; /* not touched by sas class code */ +}; + +#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata) + +static inline struct domain_device * +starget_to_domain_dev(struct scsi_target *starget) { + return starget->hostdata; +} + +static inline struct domain_device * +sdev_to_domain_dev(struct scsi_device *sdev) { + return starget_to_domain_dev(sdev->sdev_target); +} + +static inline struct domain_device * +cmd_to_domain_dev(struct scsi_cmnd *cmd) +{ + return sdev_to_domain_dev(cmd->device); +} + +void sas_hash_addr(u8 *hashed, const u8 *sas_addr); + +/* Before calling a notify event, LLDD should use this function + * when the link is severed (possibly from its tasklet). + * The idea is that the Class only reads those, while the LLDD, + * can R/W these (thus avoiding a race). + */ +static inline void sas_phy_disconnected(struct asd_sas_phy *phy) +{ + phy->oob_mode = OOB_NOT_CONNECTED; + phy->linkrate = PHY_LINKRATE_NONE; +} + +/* ---------- Tasks ---------- */ +/* + service_response | SAS_TASK_COMPLETE | SAS_TASK_UNDELIVERED | + exec_status | | | + ---------------------+---------------------+-----------------------+ + SAM_... | X | | + DEV_NO_RESPONSE | X | X | + INTERRUPTED | X | | + QUEUE_FULL | | X | + DEVICE_UNKNOWN | | X | + SG_ERR | | X | + ---------------------+---------------------+-----------------------+ + */ + +enum service_response { + SAS_TASK_COMPLETE, + SAS_TASK_UNDELIVERED = -1, +}; + +enum exec_status { + SAM_GOOD = 0, + SAM_CHECK_COND = 2, + SAM_COND_MET = 4, + SAM_BUSY = 8, + SAM_INTERMEDIATE = 0x10, + SAM_IM_COND_MET = 0x12, + SAM_RESV_CONFLICT= 0x14, + SAM_TASK_SET_FULL= 0x28, + SAM_ACA_ACTIVE = 0x30, + SAM_TASK_ABORTED = 0x40, + + SAS_DEV_NO_RESPONSE = 0x80, + SAS_DATA_UNDERRUN, + SAS_DATA_OVERRUN, + SAS_INTERRUPTED, + SAS_QUEUE_FULL, + SAS_DEVICE_UNKNOWN, + SAS_SG_ERR, + SAS_OPEN_REJECT, + SAS_OPEN_TO, + SAS_PROTO_RESPONSE, + SAS_PHY_DOWN, + SAS_NAK_R_ERR, + SAS_PENDING, + SAS_ABORTED_TASK, +}; + +/* When a task finishes with a response, the LLDD examines the + * response: + * - For an ATA task task_status_struct::stat is set to + * SAS_PROTO_RESPONSE, and the task_status_struct::buf is set to the + * contents of struct ata_task_resp. + * - For SSP tasks, if no data is present or status/TMF response + * is valid, task_status_struct::stat is set. If data is present + * (SENSE data), the LLDD copies up to SAS_STATUS_BUF_SIZE, sets + * task_status_struct::buf_valid_size, and task_status_struct::stat is + * set to SAM_CHECK_COND. + * + * "buf" has format SCSI Sense for SSP task, or struct ata_task_resp + * for ATA task. + * + * "frame_len" is the total frame length, which could be more or less + * than actually copied. + * + * Tasks ending with response, always set the residual field. + */ +struct ata_task_resp { + u16 frame_len; + u8 ending_fis[24]; /* dev to host or data-in */ + u32 sstatus; + u32 serror; + u32 scontrol; + u32 sactive; +}; + +#define SAS_STATUS_BUF_SIZE 96 + +struct task_status_struct { + enum service_response resp; + enum exec_status stat; + int buf_valid_size; + + u8 buf[SAS_STATUS_BUF_SIZE]; + + u32 residual; + enum sas_open_rej_reason open_rej_reason; +}; + +/* ATA and ATAPI task queuable to a SAS LLDD. + */ +struct sas_ata_task { + struct host_to_dev_fis fis; + u8 atapi_packet[16]; /* 0 if not ATAPI task */ + + u8 retry_count; /* hardware retry, should be > 0 */ + + u8 dma_xfer:1; /* PIO:0 or DMA:1 */ + u8 use_ncq:1; + u8 set_affil_pol:1; + u8 stp_affil_pol:1; + + u8 device_control_reg_update:1; +}; + +struct sas_smp_task { + struct scatterlist smp_req; + struct scatterlist smp_resp; +}; + +enum task_attribute { + TASK_ATTR_SIMPLE = 0, + TASK_ATTR_HOQ = 1, + TASK_ATTR_ORDERED= 2, + TASK_ATTR_ACA = 4, +}; + +struct sas_ssp_task { + u8 retry_count; /* hardware retry, should be > 0 */ + + u8 LUN[8]; + u8 enable_first_burst:1; + enum task_attribute task_attr; + u8 task_prio; + u8 cdb[16]; +}; + +struct sas_task { + struct domain_device *dev; + struct list_head list; + + spinlock_t task_state_lock; + unsigned task_state_flags; + + enum sas_proto task_proto; + + /* Used by the discovery code. */ + struct timer_list timer; + struct completion completion; + + union { + struct sas_ata_task ata_task; + struct sas_smp_task smp_task; + struct sas_ssp_task ssp_task; + }; + + struct scatterlist *scatter; + int num_scatter; + u32 total_xfer_len; + u8 data_dir:2; /* Use PCI_DMA_... */ + + struct task_status_struct task_status; + void (*task_done)(struct sas_task *); + + void *lldd_task; /* for use by LLDDs */ + void *uldd_task; +}; + + + +#define SAS_TASK_STATE_PENDING 1 +#define SAS_TASK_STATE_DONE 2 +#define SAS_TASK_STATE_ABORTED 4 + +static inline struct sas_task *sas_alloc_task(unsigned long flags) +{ + extern kmem_cache_t *sas_task_cache; + struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags); + + if (task) { + memset(task, 0, sizeof(*task)); + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + + return task; +} + +static inline void sas_free_task(struct sas_task *task) +{ + if (task) { + extern kmem_cache_t *sas_task_cache; + BUG_ON(!list_empty(&task->list)); + kmem_cache_free(sas_task_cache, task); + } +} + +struct sas_domain_function_template { + /* The class calls these to notify the LLDD of an event. */ + void (*lldd_port_formed)(struct asd_sas_phy *); + void (*lldd_port_deformed)(struct asd_sas_phy *); + + /* The class calls these when a device is found or gone. */ + int (*lldd_dev_found)(struct domain_device *); + void (*lldd_dev_gone)(struct domain_device *); + + int (*lldd_execute_task)(struct sas_task *, int num, + unsigned long gfp_flags); + + /* Task Management Functions. Must be called from process context. */ + int (*lldd_abort_task)(struct sas_task *); + int (*lldd_abort_task_set)(struct domain_device *, u8 *lun); + int (*lldd_clear_aca)(struct domain_device *, u8 *lun); + int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); + int (*lldd_I_T_nexus_reset)(struct domain_device *); + int (*lldd_lu_reset)(struct domain_device *, u8 *lun); + int (*lldd_query_task)(struct sas_task *); + + /* Port and Adapter management */ + int (*lldd_clear_nexus_port)(struct asd_sas_port *); + int (*lldd_clear_nexus_ha)(struct sas_ha_struct *); + + /* Phy management */ + int (*lldd_control_phy)(struct asd_sas_phy *, enum phy_func); +}; + +extern int sas_register_ha(struct sas_ha_struct *); +extern int sas_unregister_ha(struct sas_ha_struct *); + +extern int sas_queuecommand(struct scsi_cmnd *, + void (*scsi_done)(struct scsi_cmnd *)); +extern int sas_target_alloc(struct scsi_target *); +extern int sas_slave_alloc(struct scsi_device *); +extern int sas_slave_configure(struct scsi_device *); +extern void sas_slave_destroy(struct scsi_device *); +extern int sas_change_queue_depth(struct scsi_device *, int new_depth); +extern int sas_change_queue_type(struct scsi_device *, int qt); +extern int sas_bios_param(struct scsi_device *, + struct block_device *, + sector_t capacity, int *hsc); +extern struct scsi_transport_template * +sas_domain_attach_transport(struct sas_domain_function_template *); +extern void sas_domain_release_transport(struct scsi_transport_template *); + +int sas_discover_root_expander(struct domain_device *); + +void sas_init_ex_attr(void); + +int sas_ex_revalidate_domain(struct domain_device *); + +void sas_unregister_domain_devices(struct asd_sas_port *port); +void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *); +int sas_discover_event(struct asd_sas_port *, enum discover_event ev); + +int sas_discover_sata(struct domain_device *); +int sas_discover_end_dev(struct domain_device *); + +void sas_unregister_dev(struct domain_device *); + +void sas_init_dev(struct domain_device *); + +#endif /* _SASLIB_H_ */ diff --git a/include/scsi/sas.h b/include/scsi/sas.h new file mode 100644 index 0000000..752853a --- /dev/null +++ b/include/scsi/sas.h @@ -0,0 +1,644 @@ +/* + * SAS structures and definitions header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#ifndef _SAS_H_ +#define _SAS_H_ + +#include +#include + +#define SAS_ADDR_SIZE 8 +#define HASHED_SAS_ADDR_SIZE 3 +#define SAS_ADDR(_sa) ((unsigned long long) be64_to_cpu(*(__be64 *)(_sa))) + +#define SMP_REQUEST 0x40 +#define SMP_RESPONSE 0x41 + +#define SSP_DATA 0x01 +#define SSP_XFER_RDY 0x05 +#define SSP_COMMAND 0x06 +#define SSP_RESPONSE 0x07 +#define SSP_TASK 0x16 + +#define SMP_REPORT_GENERAL 0x00 +#define SMP_REPORT_MANUF_INFO 0x01 +#define SMP_READ_GPIO_REG 0x02 +#define SMP_DISCOVER 0x10 +#define SMP_REPORT_PHY_ERR_LOG 0x11 +#define SMP_REPORT_PHY_SATA 0x12 +#define SMP_REPORT_ROUTE_INFO 0x13 +#define SMP_WRITE_GPIO_REG 0x82 +#define SMP_CONF_ROUTE_INFO 0x90 +#define SMP_PHY_CONTROL 0x91 +#define SMP_PHY_TEST_FUNCTION 0x92 + +#define SMP_RESP_FUNC_ACC 0x00 +#define SMP_RESP_FUNC_UNK 0x01 +#define SMP_RESP_FUNC_FAILED 0x02 +#define SMP_RESP_INV_FRM_LEN 0x03 +#define SMP_RESP_NO_PHY 0x10 +#define SMP_RESP_NO_INDEX 0x11 +#define SMP_RESP_PHY_NO_SATA 0x12 +#define SMP_RESP_PHY_UNK_OP 0x13 +#define SMP_RESP_PHY_UNK_TESTF 0x14 +#define SMP_RESP_PHY_TEST_INPROG 0x15 +#define SMP_RESP_PHY_VACANT 0x16 + +/* SAM TMFs */ +#define TMF_ABORT_TASK 0x01 +#define TMF_ABORT_TASK_SET 0x02 +#define TMF_CLEAR_TASK_SET 0x04 +#define TMF_LU_RESET 0x08 +#define TMF_CLEAR_ACA 0x40 +#define TMF_QUERY_TASK 0x80 + +/* SAS TMF responses */ +#define TMF_RESP_FUNC_COMPLETE 0x00 +#define TMF_RESP_INVALID_FRAME 0x02 +#define TMF_RESP_FUNC_ESUPP 0x04 +#define TMF_RESP_FUNC_FAILED 0x05 +#define TMF_RESP_FUNC_SUCC 0x08 +#define TMF_RESP_NO_LUN 0x09 +#define TMF_RESP_OVERLAPPED_TAG 0x0A + +enum sas_oob_mode { + OOB_NOT_CONNECTED, + SATA_OOB_MODE, + SAS_OOB_MODE +}; + +/* See sas_discover.c if you plan on changing these. + */ +enum sas_dev_type { + NO_DEVICE = 0, /* protocol */ + SAS_END_DEV = 1, /* protocol */ + EDGE_DEV = 2, /* protocol */ + FANOUT_DEV = 3, /* protocol */ + SAS_HA = 4, + SATA_DEV = 5, + SATA_PM = 7, + SATA_PM_PORT= 8, +}; + +enum sas_phy_linkrate { + PHY_LINKRATE_NONE = 0, + PHY_LINKRATE_UNKNOWN = 0, + PHY_DISABLED, + PHY_RESET_PROBLEM, + PHY_SPINUP_HOLD, + PHY_PORT_SELECTOR, + PHY_LINKRATE_1_5 = 0x08, + PHY_LINKRATE_G1 = PHY_LINKRATE_1_5, + PHY_LINKRATE_3 = 0x09, + PHY_LINKRATE_G2 = PHY_LINKRATE_3, + PHY_LINKRATE_6 = 0x0A, +}; + +/* Partly from IDENTIFY address frame. */ +enum sas_proto { + SATA_PROTO = 1, + SAS_PROTO_SMP = 2, /* protocol */ + SAS_PROTO_STP = 4, /* protocol */ + SAS_PROTO_SSP = 8, /* protocol */ + SAS_PROTO_ALL = 0xE, +}; + +/* From the spec; local phys only */ +enum phy_func { + PHY_FUNC_NOP, + PHY_FUNC_LINK_RESET, /* Enables the phy */ + PHY_FUNC_HARD_RESET, + PHY_FUNC_DISABLE, + PHY_FUNC_CLEAR_ERROR_LOG = 5, + PHY_FUNC_CLEAR_AFFIL, + PHY_FUNC_TX_SATA_PS_SIGNAL, + PHY_FUNC_RELEASE_SPINUP_HOLD = 0x10, /* LOCAL PORT ONLY! */ +}; + +/* SAS LLDD would need to report only _very_few_ of those, like BROADCAST. + * Most of those are here for completeness. + */ +enum sas_prim { + SAS_PRIM_AIP_NORMAL = 1, + SAS_PRIM_AIP_R0 = 2, + SAS_PRIM_AIP_R1 = 3, + SAS_PRIM_AIP_R2 = 4, + SAS_PRIM_AIP_WC = 5, + SAS_PRIM_AIP_WD = 6, + SAS_PRIM_AIP_WP = 7, + SAS_PRIM_AIP_RWP = 8, + + SAS_PRIM_BC_CH = 9, + SAS_PRIM_BC_RCH0 = 10, + SAS_PRIM_BC_RCH1 = 11, + SAS_PRIM_BC_R0 = 12, + SAS_PRIM_BC_R1 = 13, + SAS_PRIM_BC_R2 = 14, + SAS_PRIM_BC_R3 = 15, + SAS_PRIM_BC_R4 = 16, + + SAS_PRIM_NOTIFY_ENSP= 17, + SAS_PRIM_NOTIFY_R0 = 18, + SAS_PRIM_NOTIFY_R1 = 19, + SAS_PRIM_NOTIFY_R2 = 20, + + SAS_PRIM_CLOSE_CLAF = 21, + SAS_PRIM_CLOSE_NORM = 22, + SAS_PRIM_CLOSE_R0 = 23, + SAS_PRIM_CLOSE_R1 = 24, + + SAS_PRIM_OPEN_RTRY = 25, + SAS_PRIM_OPEN_RJCT = 26, + SAS_PRIM_OPEN_ACPT = 27, + + SAS_PRIM_DONE = 28, + SAS_PRIM_BREAK = 29, + + SATA_PRIM_DMAT = 33, + SATA_PRIM_PMNAK = 34, + SATA_PRIM_PMACK = 35, + SATA_PRIM_PMREQ_S = 36, + SATA_PRIM_PMREQ_P = 37, + SATA_SATA_R_ERR = 38, +}; + +enum sas_open_rej_reason { + /* Abandon open */ + SAS_OREJ_UNKNOWN = 0, + SAS_OREJ_BAD_DEST = 1, + SAS_OREJ_CONN_RATE = 2, + SAS_OREJ_EPROTO = 3, + SAS_OREJ_RESV_AB0 = 4, + SAS_OREJ_RESV_AB1 = 5, + SAS_OREJ_RESV_AB2 = 6, + SAS_OREJ_RESV_AB3 = 7, + SAS_OREJ_WRONG_DEST= 8, + SAS_OREJ_STP_NORES = 9, + + /* Retry open */ + SAS_OREJ_NO_DEST = 10, + SAS_OREJ_PATH_BLOCKED = 11, + SAS_OREJ_RSVD_CONT0 = 12, + SAS_OREJ_RSVD_CONT1 = 13, + SAS_OREJ_RSVD_INIT0 = 14, + SAS_OREJ_RSVD_INIT1 = 15, + SAS_OREJ_RSVD_STOP0 = 16, + SAS_OREJ_RSVD_STOP1 = 17, + SAS_OREJ_RSVD_RETRY = 18, +}; + +struct dev_to_host_fis { + u8 fis_type; /* 0x34 */ + u8 flags; + u8 status; + u8 error; + + u8 lbal; + union { u8 lbam; u8 byte_count_low; }; + union { u8 lbah; u8 byte_count_high; }; + u8 device; + + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + + union { u8 sector_count; u8 interrupt_reason; }; + u8 sector_count_exp; + u8 _r_b; + u8 _r_c; + + u32 _r_d; +} __attribute__ ((packed)); + +struct host_to_dev_fis { + u8 fis_type; /* 0x27 */ + u8 flags; + u8 command; + u8 features; + + u8 lbal; + union { u8 lbam; u8 byte_count_low; }; + union { u8 lbah; u8 byte_count_high; }; + u8 device; + + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 features_exp; + + union { u8 sector_count; u8 interrupt_reason; }; + u8 sector_count_exp; + u8 _r_a; + u8 control; + + u32 _r_b; +} __attribute__ ((packed)); + +/* Prefer to have code clarity over header file clarity. + */ +#ifdef __LITTLE_ENDIAN_BITFIELD +struct sas_identify_frame { + /* Byte 0 */ + u8 frame_type:4; + u8 dev_type:3; + u8 _un0:1; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un20:1; + u8 smp_iport:1; + u8 stp_iport:1; + u8 ssp_iport:1; + u8 _un247:4; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un30:1; + u8 smp_tport:1; + u8 stp_tport:1; + u8 ssp_tport:1; + u8 _un347:4; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; + + __be32 crc; +} __attribute__ ((packed)); + +struct ssp_frame_hdr { + u8 frame_type; + u8 hashed_dest_addr[HASHED_SAS_ADDR_SIZE]; + u8 _r_a; + u8 hashed_src_addr[HASHED_SAS_ADDR_SIZE]; + __be16 _r_b; + + u8 changing_data_ptr:1; + u8 retransmit:1; + u8 retry_data_frames:1; + u8 _r_c:5; + + u8 num_fill_bytes:2; + u8 _r_d:6; + + u32 _r_e; + __be16 tag; + __be16 tptt; + __be32 data_offs; +} __attribute__ ((packed)); + +struct ssp_response_iu { + u8 _r_a[10]; + + u8 datapres:2; + u8 _r_b:6; + + u8 status; + + u32 _r_c; + + __be32 sense_data_len; + __be32 response_data_len; + + u8 resp_data[0]; + u8 sense_data[0]; +} __attribute__ ((packed)); + +/* ---------- SMP ---------- */ + +struct report_general_resp { + __be16 change_count; + __be16 route_indexes; + u8 _r_a; + u8 num_phys; + + u8 conf_route_table:1; + u8 configuring:1; + u8 _r_b:6; + + u8 _r_c; + + u8 enclosure_logical_id[8]; + + u8 _r_d[12]; +} __attribute__ ((packed)); + +struct discover_resp { + u8 _r_a[5]; + + u8 phy_id; + __be16 _r_b; + + u8 _r_c:4; + u8 attached_dev_type:3; + u8 _r_d:1; + + u8 linkrate:4; + u8 _r_e:4; + + u8 attached_sata_host:1; + u8 iproto:3; + u8 _r_f:4; + + u8 attached_sata_dev:1; + u8 tproto:3; + u8 _r_g:3; + u8 attached_sata_ps:1; + + u8 sas_addr[8]; + u8 attached_sas_addr[8]; + u8 attached_phy_id; + + u8 _r_h[7]; + + u8 hmin_linkrate:4; + u8 pmin_linkrate:4; + u8 hmax_linkrate:4; + u8 pmax_linkrate:4; + + u8 change_count; + + u8 pptv:4; + u8 _r_i:3; + u8 virtual:1; + + u8 routing_attr:4; + u8 _r_j:4; + + u8 conn_type; + u8 conn_el_index; + u8 conn_phy_link; + + u8 _r_k[8]; +} __attribute__ ((packed)); + +struct report_phy_sata_resp { + u8 _r_a[5]; + + u8 phy_id; + u8 _r_b; + + u8 affil_valid:1; + u8 affil_supp:1; + u8 _r_c:6; + + u32 _r_d; + + u8 stp_sas_addr[8]; + + struct dev_to_host_fis fis; + + u32 _r_e; + + u8 affil_stp_ini_addr[8]; + + __be32 crc; +} __attribute__ ((packed)); + +struct smp_resp { + u8 frame_type; + u8 function; + u8 result; + u8 reserved; + union { + struct report_general_resp rg; + struct discover_resp disc; + struct report_phy_sata_resp rps; + }; +} __attribute__ ((packed)); + +#elif defined(__BIG_ENDIAN_BITFIELD) +struct sas_identify_frame { + /* Byte 0 */ + u8 _un0:1; + u8 dev_type:3; + u8 frame_type:4; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un247:4; + u8 ssp_iport:1; + u8 stp_iport:1; + u8 smp_iport:1; + u8 _un20:1; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un347:4; + u8 ssp_tport:1; + u8 stp_tport:1; + u8 smp_tport:1; + u8 _un30:1; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; + + __be32 crc; +} __attribute__ ((packed)); + +struct ssp_frame_hdr { + u8 frame_type; + u8 hashed_dest_addr[HASHED_SAS_ADDR_SIZE]; + u8 _r_a; + u8 hashed_src_addr[HASHED_SAS_ADDR_SIZE]; + __be16 _r_b; + + u8 _r_c:5; + u8 retry_data_frames:1; + u8 retransmit:1; + u8 changing_data_ptr:1; + + u8 _r_d:6; + u8 num_fill_bytes:2; + + u32 _r_e; + __be16 tag; + __be16 tptt; + __be32 data_offs; +} __attribute__ ((packed)); + +struct ssp_response_iu { + u8 _r_a[10]; + + u8 _r_b:6; + u8 datapres:2; + + u8 status; + + u32 _r_c; + + __be32 sense_data_len; + __be32 response_data_len; + + u8 resp_data[0]; + u8 sense_data[0]; +} __attribute__ ((packed)); + +/* ---------- SMP ---------- */ + +struct report_general_resp { + __be16 change_count; + __be16 route_indexes; + u8 _r_a; + u8 num_phys; + + u8 _r_b:6; + u8 configuring:1; + u8 conf_route_table:1; + + u8 _r_c; + + u8 enclosure_logical_id[8]; + + u8 _r_d[12]; +} __attribute__ ((packed)); + +struct discover_resp { + u8 _r_a[5]; + + u8 phy_id; + __be16 _r_b; + + u8 _r_d:1; + u8 attached_dev_type:3; + u8 _r_c:4; + + u8 _r_e:4; + u8 linkrate:4; + + u8 _r_f:4; + u8 iproto:3; + u8 attached_sata_host:1; + + u8 attached_sata_ps:1; + u8 _r_g:3; + u8 tproto:3; + u8 attached_sata_dev:1; + + u8 sas_addr[8]; + u8 attached_sas_addr[8]; + u8 attached_phy_id; + + u8 _r_h[7]; + + u8 pmin_linkrate:4; + u8 hmin_linkrate:4; + u8 pmax_linkrate:4; + u8 hmax_linkrate:4; + + u8 change_count; + + u8 virtual:1; + u8 _r_i:3; + u8 pptv:4; + + u8 _r_j:4; + u8 routing_attr:4; + + u8 conn_type; + u8 conn_el_index; + u8 conn_phy_link; + + u8 _r_k[8]; +} __attribute__ ((packed)); + +struct report_phy_sata_resp { + u8 _r_a[5]; + + u8 phy_id; + u8 _r_b; + + u8 _r_c:6; + u8 affil_supp:1; + u8 affil_valid:1; + + u32 _r_d; + + u8 stp_sas_addr[8]; + + struct dev_to_host_fis fis; + + u32 _r_e; + + u8 affil_stp_ini_addr[8]; + + __be32 crc; +} __attribute__ ((packed)); + +struct smp_resp { + u8 frame_type; + u8 function; + u8 result; + u8 reserved; + union { + struct report_general_resp rg; + struct discover_resp disc; + struct report_phy_sata_resp rps; + }; +} __attribute__ ((packed)); + +#else +#error "Bitfield order not defined!" +#endif + +#endif /* _SAS_H_ */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 1bc6752..84a6d5f 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -429,4 +429,10 @@ struct scsi_lun { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 +/* Pull a u32 out of a SCSI message (using BE SCSI conventions) */ +static inline u32 scsi_to_u32(u8 *ptr) +{ + return (ptr[0]<<24) + (ptr[1]<<16) + (ptr[2]<<8) + ptr[3]; +} + #endif /* _SCSI_SCSI_H */ -- cgit v0.10.2 From 187afbed1814ea0851bf30bacbf807217dd7864b Mon Sep 17 00:00:00 2001 From: Jon Masters Date: Mon, 28 Aug 2006 17:08:21 -0500 Subject: [SCSI] MODULE_FIRMWARE for binary firmware(s) Right now, various kernel modules are being migrated over to use request_firmware in order to pull in binary firmware blobs from userland when the module is loaded. This makes sense. However, there is right now little mechanism in place to automatically determine which binary firmware blobs must be included with a kernel in order to satisfy the prerequisites of these drivers. This affects vendors, but also regular users to a certain extent too. The attached patch introduces MODULE_FIRMWARE as a mechanism for advertising that a particular firmware file is to be loaded - it will then show up via modinfo and could be used e.g. when packaging a kernel. Signed-off-by: Jon Masters Comments added in line with all the other MODULE_ tag Signed-off-by: James Bottomley Signed-off-by: Greg Kroah-Hartman diff --git a/include/linux/module.h b/include/linux/module.h index 0dfb794..d4486cc 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -156,6 +156,11 @@ extern struct module __this_module; */ #define MODULE_VERSION(_version) MODULE_INFO(version, _version) +/* Optional firmware file (or files) needed by the module + * format is simply firmware file name. Multiple firmware + * files require multiple MODULE_FIRMWARE() specifiers */ +#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) + /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); -- cgit v0.10.2 From bc229b3663dcd7d8f266cb13b0839efdee6d95b5 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 28 Aug 2006 17:08:21 -0500 Subject: [SCSI] aic94xx: add MODULE_FIRMWARE tag Add a tag which shows what the firmware file we're requesting is. Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c index 9050c6f3f..d9b6da5f 100644 --- a/drivers/scsi/aic94xx/aic94xx_seq.c +++ b/drivers/scsi/aic94xx/aic94xx_seq.c @@ -28,6 +28,7 @@ #include #include +#include #include #include "aic94xx_reg.h" #include "aic94xx_hwi.h" @@ -1399,3 +1400,5 @@ void asd_update_port_links(struct asd_sas_phy *sas_phy) if (err) asd_printk("couldn't update DDB 0:error:%d\n", err); } + +MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE); -- cgit v0.10.2 From f19eaa7f53736449a6eac89c3863eca2c64d5913 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 30 Aug 2006 14:18:33 -0700 Subject: [SCSI] aic94xx: Increase can_queue for better performance This patch sets can_queue in the aic94xx driver's scsi_host to better performing values than what's there currently. It seems that asd_ha->seq.can_queue reflects the number of requests that can be queued per controller; so long as there's one scsi_host per controller, it seems logical that the scsi_host ought to have the same can_queue value. To the best of my (still limited) knowledge, this method provides the correct value. The effect of leaving this value set to 1 is terrible performance in the case of either (a) certain Maxtor SAS drives flying solo or (b) flooding several disks with I/O simultaneously (md-raid). There may be more scenarios where we see similar problems that I haven't uncovered. Signed-off-by: Darrick J. Wong Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 3ec2e46..69aa708 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -620,6 +620,8 @@ static int __devinit asd_pci_probe(struct pci_dev *dev, asd_ha->hw_prof.bios.present ? "build " : "not present", asd_ha->hw_prof.bios.bld); + shost->can_queue = asd_ha->seq.can_queue; + if (use_msi) pci_enable_msi(asd_ha->pcidev); -- cgit v0.10.2 From 492dfb489658dfe4a755fa29dd0e34e9c8bd8fb8 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 30 Aug 2006 15:48:45 -0400 Subject: [SCSI] block: add support for shared tag maps The current block queue implementation already contains most of the machinery for shared tag maps. The only remaining pieces are a way to allocate and destroy a tag map independently of the queues (so that the maps can be managed on the life cycle of the overseeing entity) Acked-by: Jens Axboe Signed-off-by: James Bottomley diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index ddd9253..556a3d3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -848,21 +848,18 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) EXPORT_SYMBOL(blk_queue_find_tag); /** - * __blk_queue_free_tags - release tag maintenance info - * @q: the request queue for the device + * __blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free * - * Notes: - * blk_cleanup_queue() will take care of calling this function, if tagging - * has been used. So there's no need to call this directly. - **/ -static void __blk_queue_free_tags(request_queue_t *q) + * Tries to free the specified @bqt@. Returns true if it was + * actually freed and false if there are still references using it + */ +static int __blk_free_tags(struct blk_queue_tag *bqt) { - struct blk_queue_tag *bqt = q->queue_tags; - - if (!bqt) - return; + int retval; - if (atomic_dec_and_test(&bqt->refcnt)) { + retval = atomic_dec_and_test(&bqt->refcnt); + if (retval) { BUG_ON(bqt->busy); BUG_ON(!list_empty(&bqt->busy_list)); @@ -873,12 +870,49 @@ static void __blk_queue_free_tags(request_queue_t *q) bqt->tag_map = NULL; kfree(bqt); + } + return retval; +} + +/** + * __blk_queue_free_tags - release tag maintenance info + * @q: the request queue for the device + * + * Notes: + * blk_cleanup_queue() will take care of calling this function, if tagging + * has been used. So there's no need to call this directly. + **/ +static void __blk_queue_free_tags(request_queue_t *q) +{ + struct blk_queue_tag *bqt = q->queue_tags; + + if (!bqt) + return; + + __blk_free_tags(bqt); + q->queue_tags = NULL; q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); } + +/** + * blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * For externally managed @bqt@ frees the map. Callers of this + * function must guarantee to have released all the queues that + * might have been using this tag map. + */ +void blk_free_tags(struct blk_queue_tag *bqt) +{ + if (unlikely(!__blk_free_tags(bqt))) + BUG(); +} +EXPORT_SYMBOL(blk_free_tags); + /** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device @@ -901,7 +935,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) unsigned long *tag_map; int nr_ulongs; - if (depth > q->nr_requests * 2) { + if (q && depth > q->nr_requests * 2) { depth = q->nr_requests * 2; printk(KERN_ERR "%s: adjusted depth to %d\n", __FUNCTION__, depth); @@ -927,6 +961,38 @@ fail: return -ENOMEM; } +static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, + int depth) +{ + struct blk_queue_tag *tags; + + tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); + if (!tags) + goto fail; + + if (init_tag_map(q, tags, depth)) + goto fail; + + INIT_LIST_HEAD(&tags->busy_list); + tags->busy = 0; + atomic_set(&tags->refcnt, 1); + return tags; +fail: + kfree(tags); + return NULL; +} + +/** + * blk_init_tags - initialize the tag info for an external tag map + * @depth: the maximum queue depth supported + * @tags: the tag to use + **/ +struct blk_queue_tag *blk_init_tags(int depth) +{ + return __blk_queue_init_tags(NULL, depth); +} +EXPORT_SYMBOL(blk_init_tags); + /** * blk_queue_init_tags - initialize the queue tag info * @q: the request queue for the device @@ -941,16 +1007,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth, BUG_ON(tags && q->queue_tags && tags != q->queue_tags); if (!tags && !q->queue_tags) { - tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); - if (!tags) - goto fail; + tags = __blk_queue_init_tags(q, depth); - if (init_tag_map(q, tags, depth)) + if (!tags) goto fail; - - INIT_LIST_HEAD(&tags->busy_list); - tags->busy = 0; - atomic_set(&tags->refcnt, 1); } else if (q->queue_tags) { if ((rc = blk_queue_resize_tags(q, depth))) return rc; @@ -1002,6 +1062,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) } /* + * Currently cannot replace a shared tag map with a new + * one, so error out if this is the case + */ + if (atomic_read(&bqt->refcnt) != 1) + return -EBUSY; + + /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aafe827..427b0d6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -746,6 +746,8 @@ extern void blk_queue_free_tags(request_queue_t *); extern int blk_queue_resize_tags(request_queue_t *, int); extern void blk_queue_invalidate_tags(request_queue_t *); extern long blk_congestion_wait(int rw, long timeout); +extern struct blk_queue_tag *blk_init_tags(int); +extern void blk_free_tags(struct blk_queue_tag *); extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); extern int blkdev_issue_flush(struct block_device *, sector_t *); -- cgit v0.10.2 From 86e33a296c2c9ed6eece0bfff4ac776f42040504 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 30 Aug 2006 09:45:51 -0400 Subject: [SCSI] add shared tag map helpers This patch adds support for sharing tag maps at the host level (i.e. either every queue [LUN] has its own tag map or there's a single one for the entire host). This formulation is primarily intended to help single issue queue hardware, like the aic7xxx Signed-off-by: James Bottomley diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f244d4f..68ef163 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -265,6 +265,9 @@ static void scsi_host_dev_release(struct device *dev) destroy_workqueue(shost->work_q); scsi_destroy_command_freelist(shost); + if (shost->bqt) + blk_free_tags(shost->bqt); + kfree(shost->shost_data); if (parent) diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index b3dd90f..39c6f8c 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -16,6 +16,7 @@ struct scsi_target; struct Scsi_Host; struct scsi_host_cmd_pool; struct scsi_transport_template; +struct blk_queue_tags; /* @@ -466,6 +467,12 @@ struct Scsi_Host { struct scsi_transport_template *transportt; /* + * area to keep a shared tag map (if needed, will be + * NULL if not) + */ + struct blk_queue_tag *bqt; + + /* * The following two fields are protected with host_lock; * however, eh routines can safely access during eh processing * without acquiring the lock. diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index e47e36a..4eea254 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -4,6 +4,7 @@ #include #include #include +#include #define MSG_SIMPLE_TAG 0x20 @@ -66,7 +67,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) return; if (!blk_queue_tagged(sdev->request_queue)) - blk_queue_init_tags(sdev->request_queue, depth, NULL); + blk_queue_init_tags(sdev->request_queue, depth, + sdev->host->bqt); scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); } @@ -131,4 +133,14 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) return sdev->current_cmnd; } +/** + * scsi_init_shared_tag_map - create a shared tag map + * @shost: the host to share the tag map among all devices + * @depth: the total depth of the map + */ +static inline void scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) +{ + shost->bqt = blk_init_tags(depth); +} + #endif /* _SCSI_SCSI_TCQ_H */ -- cgit v0.10.2 From 85b6c720b0931101c8bcc3a5abdc2b8514b0fb4b Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 31 Aug 2006 18:15:22 -0400 Subject: [SCSI] sd: fix cache flushing on module removal (and individual device removal) The fix isn't actually in sd: it's in scsi_device_get(). I modified it to allow devices to be returned in SDEV_CANCEL, but not SDEV_DEL. This means that the device_remove_driver, which occurs in device_del() in scsi_remove_device() after the device has gone into SDEV_CANCEL is now effective at flushing the cache. Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 94df671..3784392 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -851,14 +851,14 @@ EXPORT_SYMBOL(scsi_track_queue_full); */ int scsi_device_get(struct scsi_device *sdev) { - if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) + if (sdev->sdev_state == SDEV_DEL) return -ENXIO; if (!get_device(&sdev->sdev_gendev)) return -ENXIO; - if (!try_module_get(sdev->host->hostt->module)) { - put_device(&sdev->sdev_gendev); - return -ENXIO; - } + /* We can fail this if we're doing SCSI operations + * from module exit (like cache flush) */ + try_module_get(sdev->host->hostt->module); + return 0; } EXPORT_SYMBOL(scsi_device_get); @@ -873,7 +873,10 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { - module_put(sdev->host->hostt->module); + /* The module refcount will be zero if scsi_device_get() + * was called from a module removal routine */ + if (likely(module_refcount(sdev->host->hostt->module) != 0)) + module_put(sdev->host->hostt->module); put_device(&sdev->sdev_gendev); } EXPORT_SYMBOL(scsi_device_put); -- cgit v0.10.2 From e5b3cd42960a10c1bc3701d4f00767463c88ec9d Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 21 Aug 2006 15:53:25 -0400 Subject: [SCSI] SCSI: sanitize INQUIRY strings Sanitize the Vendor, Product, and Revision strings contained in an INQUIRY result by setting all non-graphic or non-ASCII characters to ' '. Since the standard disallows such characters, this will affect only non-compliant devices. To help maintain backward compatibility, NUL characters are treated specially. They are taken as string terminators; they and all the following characters are set to ' '. If some valid characters get erased as a result... well, we weren't seeing them before so we haven't lost anything. The primary purpose of this change is to allow blacklist entries to match devices with illegal Vendor or Product strings. In addition, the patch updates a couple of function prototypes, giving inq_result its correct type (unsigned char *). Signed-off-by: Alan Stern Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index a24d346..31d05ab 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -397,6 +397,32 @@ void scsi_target_reap(struct scsi_target *starget) } /** + * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string + * @s: INQUIRY result string to sanitize + * @len: length of the string + * + * Description: + * The SCSI spec says that INQUIRY vendor, product, and revision + * strings must consist entirely of graphic ASCII characters, + * padded on the right with spaces. Since not all devices obey + * this rule, we will replace non-graphic or non-ASCII characters + * with spaces. Exception: a NUL character is interpreted as a + * string terminator, so all the following characters are set to + * spaces. + **/ +static void sanitize_inquiry_string(unsigned char *s, int len) +{ + int terminated = 0; + + for (; len > 0; (--len, ++s)) { + if (*s == 0) + terminated = 1; + if (terminated || *s < 0x20 || *s > 0x7e) + *s = ' '; + } +} + +/** * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY * @sdev: scsi_device to probe * @inq_result: area to store the INQUIRY result @@ -410,7 +436,7 @@ void scsi_target_reap(struct scsi_target *starget) * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length * are copied to the scsi_device any flags value is stored in *@bflags. **/ -static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, +static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, int result_len, int *bflags) { unsigned char scsi_cmd[MAX_COMMAND_SIZE]; @@ -469,7 +495,11 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, } if (result == 0) { - response_len = (unsigned char) inq_result[4] + 5; + sanitize_inquiry_string(&inq_result[8], 8); + sanitize_inquiry_string(&inq_result[16], 16); + sanitize_inquiry_string(&inq_result[32], 4); + + response_len = inq_result[4] + 5; if (response_len > 255) response_len = first_inquiry_len; /* sanity */ @@ -575,7 +605,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized **/ -static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) +static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, + int *bflags) { /* * XXX do not save the inquiry, since it can change underneath us, -- cgit v0.10.2 From ffd0436ed2e5a741c8d30062b489b989acf0a526 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:24 -0400 Subject: [SCSI] libiscsi, iscsi_tcp, iscsi_iser: check that burst lengths are valid. iSCSI RFC states that the first burst length must be smaller than the max burst length. We currently assume targets will be good, but that may not be the case, so this patch adds a check. This patch also moves the unsol data out offset to the lib so the LLDs do not have to track it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1437d7e..101e407 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -141,18 +141,11 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask) if (sc->sc_data_direction == DMA_TO_DEVICE) { BUG_ON(ctask->total_length == 0); - /* bytes to be sent via RDMA operations */ - iser_ctask->rdma_data_count = ctask->total_length - - ctask->imm_count - - ctask->unsol_count; - debug_scsi("cmd [itt %x total %d imm %d unsol_data %d " - "rdma_data %d]\n", + debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n", ctask->itt, ctask->total_length, ctask->imm_count, - ctask->unsol_count, iser_ctask->rdma_data_count); - } else - /* bytes to be sent via RDMA operations */ - iser_ctask->rdma_data_count = ctask->total_length; + ctask->unsol_count); + } iser_ctask_rdma_init(iser_ctask); } @@ -196,13 +189,10 @@ iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn, { struct iscsi_data hdr; int error = 0; - struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data; /* Send data-out PDUs while there's still unsolicited data to send */ while (ctask->unsol_count > 0) { - iscsi_prep_unsolicit_data_pdu(ctask, &hdr, - iser_ctask->rdma_data_count); - + iscsi_prep_unsolicit_data_pdu(ctask, &hdr); debug_scsi("Sending data-out: itt 0x%x, data count %d\n", hdr.itt, ctask->data_count); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 3350ba6..7c3d0c9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -257,7 +257,6 @@ struct iscsi_iser_conn { struct iscsi_iser_cmd_task { struct iser_desc desc; struct iscsi_iser_conn *iser_conn; - int rdma_data_count;/* RDMA bytes */ enum iser_task_status status; int command_sent; /* set if command sent */ int dir[ISER_DIRS_NUM]; /* set if dir use*/ diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 058f094..a97a3a4 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -1264,19 +1264,6 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, r2t->data_count); } -static void -iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) -{ - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_data_task *dtask; - - dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; - iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr, - tcp_ctask->r2t_data_count); - iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr, - sizeof(struct iscsi_hdr)); -} - /** * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * @conn: iscsi connection @@ -1326,14 +1313,11 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) if (ctask->unsol_count) tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; - tcp_ctask->r2t_data_count = ctask->total_length - - ctask->imm_count - - ctask->unsol_count; - debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " - "r2t_data %d]\n", + debug_scsi("cmd [itt 0x%x total %d imm_data %d " + "unsol count %d, unsol offset %d]\n", ctask->itt, ctask->total_length, ctask->imm_count, - ctask->unsol_count, tcp_ctask->r2t_data_count); + ctask->unsol_count, ctask->unsol_offset); } else tcp_ctask->xmstate = XMSTATE_R_HDR; @@ -1531,8 +1515,10 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->xmstate |= XMSTATE_UNS_DATA; if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { - iscsi_unsolicit_data_init(conn, ctask); - dtask = tcp_ctask->dtask; + dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; + iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); + iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr, + sizeof(struct iscsi_hdr)); if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)dtask->hdrext); @@ -1720,7 +1706,6 @@ data_out_done: * Done with this R2T. Check if there are more * outstanding R2Ts ready to be processed. */ - BUG_ON(tcp_ctask->r2t_data_count - r2t->data_length < 0); if (conn->datadgst_en) { rc = iscsi_digest_final_send(conn, ctask, &dtask->digestbuf, &dtask->digest, 1); @@ -1732,7 +1717,6 @@ data_out_done: debug_tcp("r2t done dout digest 0x%x\n", dtask->digest); } - tcp_ctask->r2t_data_count -= r2t->data_length; tcp_ctask->r2t = NULL; spin_lock_bh(&session->lock); __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 6a4ee70..aace8f7 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -157,7 +157,6 @@ struct iscsi_tcp_cmd_task { struct scatterlist *bad_sg; /* assert statement */ int sg_count; /* SG's to process */ uint32_t exp_r2tsn; - int r2t_data_count; /* R2T Data-Out bytes */ int data_offset; struct iscsi_r2t_info *r2t; /* in progress R2T */ struct iscsi_queue r2tpool; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 5884cd2..a7c6e70 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -68,8 +68,7 @@ iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn); void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, - struct iscsi_data *hdr, - int transport_data_cnt) + struct iscsi_data *hdr) { struct iscsi_conn *conn = ctask->conn; @@ -82,14 +81,12 @@ void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, hdr->itt = ctask->hdr->itt; hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); - - hdr->offset = cpu_to_be32(ctask->total_length - - transport_data_cnt - - ctask->unsol_count); + hdr->offset = cpu_to_be32(ctask->unsol_offset); if (ctask->unsol_count > conn->max_xmit_dlength) { hton24(hdr->dlength, conn->max_xmit_dlength); ctask->data_count = conn->max_xmit_dlength; + ctask->unsol_offset += ctask->data_count; hdr->flags = 0; } else { hton24(hdr->dlength, ctask->unsol_count); @@ -125,6 +122,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len); + ctask->data_count = 0; if (sc->sc_data_direction == DMA_TO_DEVICE) { hdr->flags |= ISCSI_FLAG_CMD_WRITE; /* @@ -143,6 +141,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) */ ctask->imm_count = 0; ctask->unsol_count = 0; + ctask->unsol_offset = 0; ctask->unsol_datasn = 0; if (session->imm_data_en) { @@ -156,9 +155,12 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) } else zero_data(ctask->hdr->dlength); - if (!session->initial_r2t_en) + if (!session->initial_r2t_en) { ctask->unsol_count = min(session->first_burst, ctask->total_length) - ctask->imm_count; + ctask->unsol_offset = ctask->imm_count; + } + if (!ctask->unsol_count) /* No unsolicit Data-Out's */ ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; @@ -1520,11 +1522,18 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; - if (session == NULL) { + if (!session) { printk(KERN_ERR "iscsi: can't start unbound connection\n"); return -EPERM; } + if (session->first_burst > session->max_burst) { + printk("iscsi: invalid burst lengths: " + "first_burst %d max_burst %d\n", + session->first_burst, session->max_burst); + return -EINVAL; + } + spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_STARTED; session->state = ISCSI_STATE_LOGGED_IN; diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 41904f6..4900650 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -102,6 +102,8 @@ struct iscsi_cmd_task { uint32_t unsol_datasn; int imm_count; /* imm-data (bytes) */ int unsol_count; /* unsolicited (bytes)*/ + /* offset in unsolicited stream (bytes); */ + int unsol_offset; int data_count; /* remaining Data-Out */ struct scsi_cmnd *sc; /* associated SCSI cmd*/ int total_length; @@ -290,8 +292,7 @@ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, extern int iscsi_check_assign_cmdsn(struct iscsi_session *, struct iscsi_nopin *); extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *, - struct iscsi_data *hdr, - int transport_data_cnt); + struct iscsi_data *hdr); extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *, char *, uint32_t); extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, -- cgit v0.10.2 From 60ecebf5a10e42f5e2d6e07eb9e24bdee8500b81 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:25 -0400 Subject: [SCSI] add refcouting around ctask usage in main IO patch It is possible that a ctask could be completing and getting cleaned up at the same time, we are finishing up the last data transfer. This could then result in the data transfer code using stale or invalid values. This patch adds a refcount to the ctask. When the count goes to zero then we know the transmit thread and recv thread or softirq are not touching it and we can safely release it. The eh should not need to grab a reference because it only cleans up a task if it has both the xmit mutex and recv lock (or recv side suspended). Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index a7c6e70..9584cbc 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -179,16 +179,15 @@ EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu); /** * iscsi_complete_command - return command back to scsi-ml - * @session: iscsi session * @ctask: iscsi cmd task * * Must be called with session lock. * This function returns the scsi command to scsi-ml and returns * the cmd task to the pool of available cmd tasks. */ -static void iscsi_complete_command(struct iscsi_session *session, - struct iscsi_cmd_task *ctask) +static void iscsi_complete_command(struct iscsi_cmd_task *ctask) { + struct iscsi_session *session = ctask->conn->session; struct scsi_cmnd *sc = ctask->sc; ctask->state = ISCSI_TASK_COMPLETED; @@ -198,6 +197,35 @@ static void iscsi_complete_command(struct iscsi_session *session, sc->scsi_done(sc); } +static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask) +{ + atomic_inc(&ctask->refcount); +} + +static void iscsi_get_ctask(struct iscsi_cmd_task *ctask) +{ + spin_lock_bh(&ctask->conn->session->lock); + __iscsi_get_ctask(ctask); + spin_unlock_bh(&ctask->conn->session->lock); +} + +static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) +{ + struct iscsi_conn *conn = ctask->conn; + + if (atomic_dec_and_test(&ctask->refcount)) { + conn->session->tt->cleanup_cmd_task(conn, ctask); + iscsi_complete_command(ctask); + } +} + +static void iscsi_put_ctask(struct iscsi_cmd_task *ctask) +{ + spin_lock_bh(&ctask->conn->session->lock); + __iscsi_put_ctask(ctask); + spin_unlock_bh(&ctask->conn->session->lock); +} + /** * iscsi_cmd_rsp - SCSI Command Response processing * @conn: iscsi connection @@ -274,7 +302,7 @@ out: (long)sc, sc->result, ctask->itt); conn->scsirsp_pdus_cnt++; - iscsi_complete_command(conn->session, ctask); + __iscsi_put_ctask(ctask); return rc; } @@ -338,7 +366,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, BUG_ON((void*)ctask != ctask->sc->SCp.ptr); if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { conn->scsirsp_pdus_cnt++; - iscsi_complete_command(session, ctask); + __iscsi_put_ctask(ctask); } break; case ISCSI_OP_R2T: @@ -563,7 +591,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) BUG_ON(conn->ctask && conn->mtask); if (conn->ctask) { + iscsi_get_ctask(conn->ctask); rc = tt->xmit_cmd_task(conn, conn->ctask); + iscsi_put_ctask(conn->ctask); if (rc) goto again; /* done with this in-progress ctask */ @@ -604,12 +634,19 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) struct iscsi_cmd_task, running); conn->ctask->state = ISCSI_TASK_RUNNING; list_move_tail(conn->xmitqueue.next, &conn->run_list); + __iscsi_get_ctask(conn->ctask); spin_unlock_bh(&conn->session->lock); rc = tt->xmit_cmd_task(conn, conn->ctask); if (rc) goto again; + spin_lock_bh(&conn->session->lock); + __iscsi_put_ctask(conn->ctask); + if (rc) { + spin_unlock_bh(&conn->session->lock); + goto again; + } } spin_unlock_bh(&conn->session->lock); /* done with this ctask */ @@ -659,6 +696,7 @@ enum { FAILURE_SESSION_FAILED, FAILURE_SESSION_FREED, FAILURE_WINDOW_CLOSED, + FAILURE_OOM, FAILURE_SESSION_TERMINATE, FAILURE_SESSION_IN_RECOVERY, FAILURE_SESSION_RECOVERY_TIMEOUT, @@ -717,10 +755,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) conn = session->leadconn; - __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); + if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask, + sizeof(void*))) { + reason = FAILURE_OOM; + goto reject; + } sc->SCp.phase = session->age; sc->SCp.ptr = (char *)ctask; + atomic_set(&ctask->refcount, 1); ctask->state = ISCSI_TASK_PENDING; ctask->mtask = NULL; ctask->conn = conn; @@ -1057,13 +1100,11 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, sc = ctask->sc; if (!sc) return; - - conn->session->tt->cleanup_cmd_task(conn, ctask); iscsi_ctask_mtask_cleanup(ctask); sc->result = err; sc->resid = sc->request_bufflen; - iscsi_complete_command(conn->session, ctask); + __iscsi_put_ctask(ctask); } int iscsi_eh_abort(struct scsi_cmnd *sc) diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 4900650..401192e 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -112,6 +112,7 @@ struct iscsi_cmd_task { /* state set/tested under session->lock */ int state; + atomic_t refcount; struct list_head running; /* running cmd list */ void *dd_data; /* driver/transport data */ }; -- cgit v0.10.2 From 98a9416af08385f8497e9c1595113a81aefa5d49 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:26 -0400 Subject: [SCSI] attempt to complete r2t with data len greater than max burst A couple targets like string bean and MDS, send r2ts with a data len greater than the max burst we agreed to. We were being strict in our enforcing of the iscsi rfc in that code path, but there is no driver limitation that prevents us from fullfilling the request. To allow those targets to work we will ignore the max_burst length and send as much data as the target asks for assuming it has consciously decided to override its max burst length. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index a97a3a4..d6927f1 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -358,8 +358,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) int r2tsn = be32_to_cpu(rhdr->r2tsn); int rc; - if (tcp_conn->in.datalen) + if (tcp_conn->in.datalen) { + printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n", + tcp_conn->in.datalen); return ISCSI_ERR_DATALEN; + } if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn) return ISCSI_ERR_R2TSN; @@ -385,15 +388,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) r2t->exp_statsn = rhdr->statsn; r2t->data_length = be32_to_cpu(rhdr->data_length); - if (r2t->data_length == 0 || - r2t->data_length > session->max_burst) { + if (r2t->data_length == 0) { + printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); spin_unlock(&session->lock); return ISCSI_ERR_DATALEN; } + if (r2t->data_length > session->max_burst) + debug_scsi("invalid R2T with data len %u and max burst %u." + "Attempting to execute request.\n", + r2t->data_length, session->max_burst); + r2t->data_offset = be32_to_cpu(rhdr->data_offset); if (r2t->data_offset + r2t->data_length > ctask->total_length) { spin_unlock(&session->lock); + printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " + "offset %u and total length %d\n", r2t->data_length, + r2t->data_offset, ctask->total_length); return ISCSI_ERR_DATALEN; } -- cgit v0.10.2 From 62f383003c22cd34920d0412465eddcb1223da0d Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:27 -0400 Subject: [SCSI] iscsi_tcp: fix padding, data digests, and IO at weird offsets iscsi_tcp calculates padding by using the expected transfer length. This has the problem where if we have immediate data = no and initial R2T = yes, and the transfer length ended up needing padding then we send: 1. header 2. padding which should have gone after data 3. data Besides this bug, we also assume the target will always ask for nice transfer lengths and the first burst length will always be a nice value. As far as I can tell form the RFC this is not a requirement. It would be silly to do this, but if someone did it we will end doing bad things. Finally the last bug in that bit of code is in our handling of the recalculation of data digests when we do not send a whole iscsi_buf in one try. The bug here is that we call crypto_digest_final on a iscsi_sendpage error, then when we send the rest of the iscsi_buf, we doiscsi_data_digest_init and this causes the previous data digest to be lost. And to make matters worse, some of these bugs are replicated over and over and over again for immediate data, solicited data and unsolicited data. So the attached patch made over the iscsi git tree (see kernel.org/git for details) which I updated today to include the patches I said I merged, consolidates the sending of data, padding and digests and calculation of data digests and fixes the above bugs. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index d6927f1..290c1d7 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -281,7 +281,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, { struct iscsi_data *hdr; struct scsi_cmnd *sc = ctask->sc; - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; hdr = &r2t->dtask.hdr; memset(hdr, 0, sizeof(struct iscsi_data)); @@ -336,10 +335,12 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, sg_count += sg->length; } BUG_ON(r2t->sg == NULL); - } else - iscsi_buf_init_iov(&tcp_ctask->sendbuf, + } else { + iscsi_buf_init_iov(&r2t->sendbuf, (char*)sc->request_buffer + r2t->data_offset, r2t->data_count); + r2t->sg = NULL; + } } /** @@ -503,7 +504,6 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) goto copy_hdr; spin_lock(&session->lock); - iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); spin_unlock(&session->lock); break; @@ -676,15 +676,15 @@ iscsi_tcp_copy(struct iscsi_conn *conn) } static inline void -partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, - struct scatterlist *sg, int offset, int length) +partial_sg_digest_update(struct crypto_tfm *tfm, struct scatterlist *sg, + int offset, int length) { struct scatterlist temp; memcpy(&temp, sg, sizeof(struct scatterlist)); temp.offset = offset; temp.length = length; - crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); + crypto_digest_update(tfm, &temp, 1); } static void @@ -751,7 +751,8 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) tcp_conn->data_rx_tfm, &sg[i], 1); else - partial_sg_digest_update(tcp_conn, + partial_sg_digest_update( + tcp_conn->data_rx_tfm, &sg[i], sg[i].offset + offset, sg[i].length - offset); @@ -765,7 +766,8 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) /* * data-in is complete, but buffer not... */ - partial_sg_digest_update(tcp_conn, &sg[i], + partial_sg_digest_update(tcp_conn->data_rx_tfm, + &sg[i], sg[i].offset, sg[i].length-rc); rc = 0; break; @@ -783,7 +785,6 @@ done: (long)sc, sc->result, ctask->itt, tcp_conn->in.hdr->flags); spin_lock(&conn->session->lock); - iscsi_tcp_cleanup_ctask(conn, ctask); __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); spin_unlock(&conn->session->lock); } @@ -803,9 +804,6 @@ iscsi_data_recv(struct iscsi_conn *conn) rc = iscsi_scsi_data_in(conn); break; case ISCSI_OP_SCSI_CMD_RSP: - spin_lock(&conn->session->lock); - iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); - spin_unlock(&conn->session->lock); case ISCSI_OP_TEXT_RSP: case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_ASYNC_EVENT: @@ -1188,37 +1186,12 @@ iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, static inline void iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, - struct iscsi_cmd_task *ctask) + struct iscsi_tcp_cmd_task *tcp_ctask) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - - BUG_ON(!tcp_conn->data_tx_tfm); crypto_digest_init(tcp_conn->data_tx_tfm); tcp_ctask->digest_count = 4; } -static int -iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, - struct iscsi_buf *buf, uint32_t *digest, int final) -{ - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - int rc = 0; - int sent = 0; - - if (final) - crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); - - iscsi_buf_init_iov(buf, (char*)digest, 4); - rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); - if (rc) { - tcp_ctask->datadigest = *digest; - tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; - } else - tcp_ctask->digest_count = 4; - return rc; -} - /** * iscsi_solicit_data_cont - initialize next Data-Out * @conn: iscsi connection @@ -1236,7 +1209,6 @@ static void iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, struct iscsi_r2t_info *r2t, int left) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_data *hdr; struct scsi_cmnd *sc = ctask->sc; int new_offset; @@ -1265,14 +1237,30 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, sizeof(struct iscsi_hdr)); - if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) { - BUG_ON(tcp_ctask->bad_sg == r2t->sg); + if (iscsi_buf_left(&r2t->sendbuf)) + return; + + if (sc->use_sg) { iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); r2t->sg += 1; - } else - iscsi_buf_init_iov(&tcp_ctask->sendbuf, + } else { + iscsi_buf_init_iov(&r2t->sendbuf, (char*)sc->request_buffer + new_offset, r2t->data_count); + r2t->sg = NULL; + } +} + +static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, + unsigned long len) +{ + tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1); + if (!tcp_ctask->pad_count) + return; + + tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; + debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); + tcp_ctask->xmstate |= XMSTATE_W_PAD; } /** @@ -1300,31 +1288,16 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) if (sc->use_sg) { struct scatterlist *sg = sc->request_buffer; - iscsi_buf_init_sg(&tcp_ctask->sendbuf, - &sg[tcp_ctask->sg_count++]); - tcp_ctask->sg = sg; + iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg); + tcp_ctask->sg = sg + 1; tcp_ctask->bad_sg = sg + sc->use_sg; - } else + } else { iscsi_buf_init_iov(&tcp_ctask->sendbuf, sc->request_buffer, sc->request_bufflen); - - if (ctask->imm_count) - tcp_ctask->xmstate |= XMSTATE_IMM_DATA; - - tcp_ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1); - if (tcp_ctask->pad_count) { - tcp_ctask->pad_count = ISCSI_PAD_LEN - - tcp_ctask->pad_count; - debug_scsi("write padding %d bytes\n", - tcp_ctask->pad_count); - tcp_ctask->xmstate |= XMSTATE_W_PAD; + tcp_ctask->sg = NULL; + tcp_ctask->bad_sg = NULL; } - - if (ctask->unsol_count) - tcp_ctask->xmstate |= XMSTATE_UNS_HDR | - XMSTATE_UNS_INIT; - debug_scsi("cmd [itt 0x%x total %d imm_data %d " "unsol count %d, unsol offset %d]\n", ctask->itt, ctask->total_length, ctask->imm_count, @@ -1410,8 +1383,8 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) } static inline int -handle_xmstate_r_hdr(struct iscsi_conn *conn, - struct iscsi_tcp_cmd_task *tcp_ctask) +iscsi_send_read_hdr(struct iscsi_conn *conn, + struct iscsi_tcp_cmd_task *tcp_ctask) { int rc; @@ -1429,7 +1402,7 @@ handle_xmstate_r_hdr(struct iscsi_conn *conn, } static inline int -handle_xmstate_w_hdr(struct iscsi_conn *conn, +iscsi_send_write_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; @@ -1440,85 +1413,125 @@ handle_xmstate_w_hdr(struct iscsi_conn *conn, iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)tcp_ctask->hdrext); rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); - if (rc) + if (rc) { tcp_ctask->xmstate |= XMSTATE_W_HDR; - return rc; + return rc; + } + + if (ctask->imm_count) { + tcp_ctask->xmstate |= XMSTATE_IMM_DATA; + iscsi_set_padding(tcp_ctask, ctask->imm_count); + + if (ctask->conn->datadgst_en) { + iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); + tcp_ctask->immdigest = 0; + } + } + + if (ctask->unsol_count) + tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; + return 0; } -static inline int -handle_xmstate_data_digest(struct iscsi_conn *conn, - struct iscsi_cmd_task *ctask) +static int +iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - int rc; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + int sent = 0, rc; - tcp_ctask->xmstate &= ~XMSTATE_DATA_DIGEST; - debug_tcp("resent data digest 0x%x\n", tcp_ctask->datadigest); - rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, - &tcp_ctask->datadigest, 0); + if (tcp_ctask->xmstate & XMSTATE_W_PAD) { + iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, + tcp_ctask->pad_count); + if (conn->datadgst_en) + crypto_digest_update(tcp_conn->data_tx_tfm, + &tcp_ctask->sendbuf.sg, 1); + } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) + return 0; + + tcp_ctask->xmstate &= ~XMSTATE_W_PAD; + tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD; + debug_scsi("sending %d pad bytes for itt 0x%x\n", + tcp_ctask->pad_count, ctask->itt); + rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, + &sent); if (rc) { - tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; - debug_tcp("resent data digest 0x%x fail!\n", - tcp_ctask->datadigest); + debug_scsi("padding send failed %d\n", rc); + tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD; } - return rc; } -static inline int -handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +static int +iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, + struct iscsi_buf *buf, uint32_t *digest) { - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - int rc; + struct iscsi_tcp_cmd_task *tcp_ctask; + struct iscsi_tcp_conn *tcp_conn; + int rc, sent = 0; - BUG_ON(!ctask->imm_count); - tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; + if (!conn->datadgst_en) + return 0; - if (conn->datadgst_en) { - iscsi_data_digest_init(tcp_conn, ctask); - tcp_ctask->immdigest = 0; - } + tcp_ctask = ctask->dd_data; + tcp_conn = conn->dd_data; - for (;;) { - rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, - &ctask->imm_count, &tcp_ctask->sent); - if (rc) { - tcp_ctask->xmstate |= XMSTATE_IMM_DATA; - if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8*)&tcp_ctask->immdigest); - debug_tcp("tx imm sendpage fail 0x%x\n", - tcp_ctask->datadigest); - } - return rc; - } - if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { + crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); + iscsi_buf_init_iov(buf, (char*)digest, 4); + } + tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; - if (!ctask->imm_count) - break; - iscsi_buf_init_sg(&tcp_ctask->sendbuf, - &tcp_ctask->sg[tcp_ctask->sg_count++]); + rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); + if (!rc) + debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest, + ctask->itt); + else { + debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", + *digest, ctask->itt); + tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST; } + return rc; +} - if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { - rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, - &tcp_ctask->immdigest, 1); - if (rc) { - debug_tcp("sending imm digest 0x%x fail!\n", - tcp_ctask->immdigest); - return rc; +static int +iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf, + struct scatterlist **sg, int *sent, int *count, + struct iscsi_buf *digestbuf, uint32_t *digest) +{ + struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; + struct iscsi_conn *conn = ctask->conn; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + int rc, buf_sent, offset; + + while (*count) { + buf_sent = 0; + offset = sendbuf->sent; + + rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent); + *sent = *sent + buf_sent; + if (buf_sent && conn->datadgst_en) + partial_sg_digest_update(tcp_conn->data_tx_tfm, + &sendbuf->sg, sendbuf->sg.offset + offset, + buf_sent); + if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) { + iscsi_buf_init_sg(sendbuf, *sg); + *sg = *sg + 1; } - debug_tcp("sending imm digest 0x%x\n", tcp_ctask->immdigest); + + if (rc) + return rc; } - return 0; + rc = iscsi_send_padding(conn, ctask); + if (rc) + return rc; + + return iscsi_send_digest(conn, ctask, digestbuf, digest); } -static inline int -handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +static int +iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; struct iscsi_data_task *dtask; @@ -1526,14 +1539,21 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->xmstate |= XMSTATE_UNS_DATA; if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { - dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; + dtask = &tcp_ctask->unsol_dtask; + iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr, sizeof(struct iscsi_hdr)); if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)dtask->hdrext); + if (conn->datadgst_en) { + iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); + dtask->digest = 0; + } + tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; + iscsi_set_padding(tcp_ctask, ctask->data_count); } rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); @@ -1548,247 +1568,128 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) return 0; } -static inline int -handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +static int +iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_data_task *dtask = tcp_ctask->dtask; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; int rc; - BUG_ON(!ctask->data_count); - tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; - - if (conn->datadgst_en) { - iscsi_data_digest_init(tcp_conn, ctask); - dtask->digest = 0; + if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { + BUG_ON(!ctask->unsol_count); + tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; +send_hdr: + rc = iscsi_send_unsol_hdr(conn, ctask); + if (rc) + return rc; } - for (;;) { + if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { + struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; int start = tcp_ctask->sent; - rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, - &ctask->data_count, &tcp_ctask->sent); - if (rc) { - ctask->unsol_count -= tcp_ctask->sent - start; - tcp_ctask->xmstate |= XMSTATE_UNS_DATA; - /* will continue with this ctask later.. */ - if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8 *)&dtask->digest); - debug_tcp("tx uns data fail 0x%x\n", - dtask->digest); - } - return rc; - } - - BUG_ON(tcp_ctask->sent > ctask->total_length); + rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, + &tcp_ctask->sent, &ctask->data_count, + &dtask->digestbuf, &dtask->digest); ctask->unsol_count -= tcp_ctask->sent - start; - + if (rc) + return rc; + tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; /* - * XXX:we may run here with un-initial sendbuf. - * so pass it + * Done with the Data-Out. Next, check if we need + * to send another unsolicited Data-Out. */ - if (conn->datadgst_en && tcp_ctask->sent - start > 0) - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); - - if (!ctask->data_count) - break; - iscsi_buf_init_sg(&tcp_ctask->sendbuf, - &tcp_ctask->sg[tcp_ctask->sg_count++]); - } - BUG_ON(ctask->unsol_count < 0); - - /* - * Done with the Data-Out. Next, check if we need - * to send another unsolicited Data-Out. - */ - if (ctask->unsol_count) { - if (conn->datadgst_en) { - rc = iscsi_digest_final_send(conn, ctask, - &dtask->digestbuf, - &dtask->digest, 1); - if (rc) { - debug_tcp("send uns digest 0x%x fail\n", - dtask->digest); - return rc; - } - debug_tcp("sending uns digest 0x%x, more uns\n", - dtask->digest); - } - tcp_ctask->xmstate |= XMSTATE_UNS_INIT; - return 1; - } - - if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { - rc = iscsi_digest_final_send(conn, ctask, - &dtask->digestbuf, - &dtask->digest, 1); - if (rc) { - debug_tcp("send last uns digest 0x%x fail\n", - dtask->digest); - return rc; + if (ctask->unsol_count) { + debug_scsi("sending more uns\n"); + tcp_ctask->xmstate |= XMSTATE_UNS_INIT; + goto send_hdr; } - debug_tcp("sending uns digest 0x%x\n",dtask->digest); } - return 0; } -static inline int -handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) +static int iscsi_send_sol_pdu(struct iscsi_conn *conn, + struct iscsi_cmd_task *ctask) { - struct iscsi_session *session = conn->session; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_r2t_info *r2t = tcp_ctask->r2t; - struct iscsi_data_task *dtask = &r2t->dtask; + struct iscsi_session *session = conn->session; + struct iscsi_r2t_info *r2t; + struct iscsi_data_task *dtask; int left, rc; - tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; - tcp_ctask->dtask = dtask; - - if (conn->datadgst_en) { - iscsi_data_digest_init(tcp_conn, ctask); - dtask->digest = 0; - } -solicit_again: - /* - * send Data-Out within this R2T sequence. - */ - if (!r2t->data_count) - goto data_out_done; - - rc = iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent); - if (rc) { + if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { + tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; tcp_ctask->xmstate |= XMSTATE_SOL_DATA; - /* will continue with this ctask later.. */ - if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8 *)&dtask->digest); - debug_tcp("r2t data send fail 0x%x\n", dtask->digest); - } - return rc; - } + if (!tcp_ctask->r2t) + __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, + sizeof(void*)); +send_hdr: + r2t = tcp_ctask->r2t; + dtask = &r2t->dtask; - BUG_ON(r2t->data_count < 0); - if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, - 1); - - if (r2t->data_count) { - BUG_ON(ctask->sc->use_sg == 0); - if (!iscsi_buf_left(&r2t->sendbuf)) { - BUG_ON(tcp_ctask->bad_sg == r2t->sg); - iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); - r2t->sg += 1; - } - goto solicit_again; - } + if (conn->hdrdgst_en) + iscsi_hdr_digest(conn, &r2t->headbuf, + (u8*)dtask->hdrext); -data_out_done: - /* - * Done with this Data-Out. Next, check if we have - * to send another Data-Out for this R2T. - */ - BUG_ON(r2t->data_length - r2t->sent < 0); - left = r2t->data_length - r2t->sent; - if (left) { if (conn->datadgst_en) { - rc = iscsi_digest_final_send(conn, ctask, - &dtask->digestbuf, - &dtask->digest, 1); - if (rc) { - debug_tcp("send r2t data digest 0x%x" - "fail\n", dtask->digest); - return rc; - } - debug_tcp("r2t data send digest 0x%x\n", - dtask->digest); + iscsi_data_digest_init(conn->dd_data, tcp_ctask); + dtask->digest = 0; } - iscsi_solicit_data_cont(conn, ctask, r2t, left); - tcp_ctask->xmstate |= XMSTATE_SOL_DATA; - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; - return 1; - } - /* - * Done with this R2T. Check if there are more - * outstanding R2Ts ready to be processed. - */ - if (conn->datadgst_en) { - rc = iscsi_digest_final_send(conn, ctask, &dtask->digestbuf, - &dtask->digest, 1); + rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); if (rc) { - debug_tcp("send last r2t data digest 0x%x" - "fail\n", dtask->digest); + tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; + tcp_ctask->xmstate |= XMSTATE_SOL_HDR; return rc; } - debug_tcp("r2t done dout digest 0x%x\n", dtask->digest); - } - tcp_ctask->r2t = NULL; - spin_lock_bh(&session->lock); - __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); - spin_unlock_bh(&session->lock); - if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { - tcp_ctask->r2t = r2t; - tcp_ctask->xmstate |= XMSTATE_SOL_DATA; - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; - return 1; + iscsi_set_padding(tcp_ctask, r2t->data_count); + debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", + r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, + r2t->sent); } - return 0; -} + if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { + r2t = tcp_ctask->r2t; + dtask = &r2t->dtask; -static inline int -handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) -{ - struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct iscsi_data_task *dtask = tcp_ctask->dtask; - int sent = 0, rc; + rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg, + &r2t->sent, &r2t->data_count, + &dtask->digestbuf, &dtask->digest); + if (rc) + return rc; + tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; - tcp_ctask->xmstate &= ~XMSTATE_W_PAD; - iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, - tcp_ctask->pad_count); - rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, - &sent); - if (rc) { - tcp_ctask->xmstate |= XMSTATE_W_PAD; - return rc; - } + /* + * Done with this Data-Out. Next, check if we have + * to send another Data-Out for this R2T. + */ + BUG_ON(r2t->data_length - r2t->sent < 0); + left = r2t->data_length - r2t->sent; + if (left) { + iscsi_solicit_data_cont(conn, ctask, r2t, left); + tcp_ctask->xmstate |= XMSTATE_SOL_DATA; + tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; + goto send_hdr; + } - if (conn->datadgst_en) { - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); - /* imm data? */ - if (!dtask) { - rc = iscsi_digest_final_send(conn, ctask, - &tcp_ctask->immbuf, - &tcp_ctask->immdigest, 1); - if (rc) { - debug_tcp("send padding digest 0x%x" - "fail!\n", tcp_ctask->immdigest); - return rc; - } - debug_tcp("done with padding, digest 0x%x\n", - tcp_ctask->datadigest); - } else { - rc = iscsi_digest_final_send(conn, ctask, - &dtask->digestbuf, - &dtask->digest, 1); - if (rc) { - debug_tcp("send padding digest 0x%x" - "fail\n", dtask->digest); - return rc; - } - debug_tcp("done with padding, digest 0x%x\n", - dtask->digest); + /* + * Done with this R2T. Check if there are more + * outstanding R2Ts ready to be processed. + */ + spin_lock_bh(&session->lock); + tcp_ctask->r2t = NULL; + __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, + sizeof(void*)); + if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, + sizeof(void*))) { + tcp_ctask->r2t = r2t; + tcp_ctask->xmstate |= XMSTATE_SOL_DATA; + tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; + spin_unlock_bh(&session->lock); + goto send_hdr; } + spin_unlock_bh(&session->lock); } - return 0; } @@ -1808,85 +1709,30 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) return rc; if (tcp_ctask->xmstate & XMSTATE_R_HDR) - return handle_xmstate_r_hdr(conn, tcp_ctask); + return iscsi_send_read_hdr(conn, tcp_ctask); if (tcp_ctask->xmstate & XMSTATE_W_HDR) { - rc = handle_xmstate_w_hdr(conn, ctask); - if (rc) - return rc; - } - - /* XXX: for data digest xmit recover */ - if (tcp_ctask->xmstate & XMSTATE_DATA_DIGEST) { - rc = handle_xmstate_data_digest(conn, ctask); + rc = iscsi_send_write_hdr(conn, ctask); if (rc) return rc; } if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { - rc = handle_xmstate_imm_data(conn, ctask); + rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, + &tcp_ctask->sent, &ctask->imm_count, + &tcp_ctask->immbuf, &tcp_ctask->immdigest); if (rc) return rc; + tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; } - if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { - BUG_ON(!ctask->unsol_count); - tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; -unsolicit_head_again: - rc = handle_xmstate_uns_hdr(conn, ctask); - if (rc) - return rc; - } - - if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { - rc = handle_xmstate_uns_data(conn, ctask); - if (rc == 1) - goto unsolicit_head_again; - else if (rc) - return rc; - goto done; - } - - if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { - struct iscsi_r2t_info *r2t; - - tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; - tcp_ctask->xmstate |= XMSTATE_SOL_DATA; - if (!tcp_ctask->r2t) - __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, - sizeof(void*)); -solicit_head_again: - r2t = tcp_ctask->r2t; - if (conn->hdrdgst_en) - iscsi_hdr_digest(conn, &r2t->headbuf, - (u8*)r2t->dtask.hdrext); - rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); - if (rc) { - tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; - tcp_ctask->xmstate |= XMSTATE_SOL_HDR; - return rc; - } - - debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", - r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, - r2t->sent); - } - - if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { - rc = handle_xmstate_sol_data(conn, ctask); - if (rc == 1) - goto solicit_head_again; - if (rc) - return rc; - } + rc = iscsi_send_unsol_pdu(conn, ctask); + if (rc) + return rc; -done: - /* - * Last thing to check is whether we need to send write - * padding. Note that we check for xmstate equality, not just the bit. - */ - if (tcp_ctask->xmstate == XMSTATE_W_PAD) - rc = handle_xmstate_w_pad(conn, ctask); + rc = iscsi_send_sol_pdu(conn, ctask); + if (rc) + return rc; return rc; } diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index aace8f7..7e40e94 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -31,23 +31,21 @@ #define IN_PROGRESS_DDIGEST_RECV 0x3 /* xmit state machine */ -#define XMSTATE_IDLE 0x0 -#define XMSTATE_R_HDR 0x1 -#define XMSTATE_W_HDR 0x2 -#define XMSTATE_IMM_HDR 0x4 -#define XMSTATE_IMM_DATA 0x8 -#define XMSTATE_UNS_INIT 0x10 -#define XMSTATE_UNS_HDR 0x20 -#define XMSTATE_UNS_DATA 0x40 -#define XMSTATE_SOL_HDR 0x80 -#define XMSTATE_SOL_DATA 0x100 -#define XMSTATE_W_PAD 0x200 -#define XMSTATE_DATA_DIGEST 0x400 - -#define ISCSI_CONN_RCVBUF_MIN 262144 -#define ISCSI_CONN_SNDBUF_MIN 262144 +#define XMSTATE_IDLE 0x0 +#define XMSTATE_R_HDR 0x1 +#define XMSTATE_W_HDR 0x2 +#define XMSTATE_IMM_HDR 0x4 +#define XMSTATE_IMM_DATA 0x8 +#define XMSTATE_UNS_INIT 0x10 +#define XMSTATE_UNS_HDR 0x20 +#define XMSTATE_UNS_DATA 0x40 +#define XMSTATE_SOL_HDR 0x80 +#define XMSTATE_SOL_DATA 0x100 +#define XMSTATE_W_PAD 0x200 +#define XMSTATE_W_RESEND_PAD 0x400 +#define XMSTATE_W_RESEND_DATA_DIGEST 0x800 + #define ISCSI_PAD_LEN 4 -#define ISCSI_R2T_MAX 16 #define ISCSI_SG_TABLESIZE SG_ALL #define ISCSI_TCP_MAX_CMD_LEN 16 @@ -162,13 +160,10 @@ struct iscsi_tcp_cmd_task { struct iscsi_queue r2tpool; struct kfifo *r2tqueue; struct iscsi_r2t_info **r2ts; - uint32_t datadigest; /* for recover digest */ int digest_count; uint32_t immdigest; /* for imm data */ struct iscsi_buf immbuf; /* for imm data digest */ - struct iscsi_data_task *dtask; /* data task in progress*/ struct iscsi_data_task unsol_dtask; /* unsol data task */ - int digest_offset; /* for partial buff digest */ }; #endif /* ISCSI_H */ diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 9584cbc..fb65311 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -325,6 +325,30 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) wake_up(&conn->ehwait); } +static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *data, int datalen) +{ + struct iscsi_reject *reject = (struct iscsi_reject *)hdr; + struct iscsi_hdr rejected_pdu; + uint32_t itt; + + conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; + + if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) { + if (ntoh24(reject->dlength) > datalen) + return ISCSI_ERR_PROTO; + + if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { + memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); + itt = rejected_pdu.itt & ISCSI_ITT_MASK; + printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " + "due to DataDigest error.\n", itt, + rejected_pdu.opcode); + } + } + return 0; +} + /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn @@ -436,6 +460,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, break; } } else if (itt == ISCSI_RESERVED_TAG) { + rc = iscsi_check_assign_cmdsn(session, + (struct iscsi_nopin*)hdr); + if (rc) + goto done; + switch(opcode) { case ISCSI_OP_NOOP_IN: if (datalen) { @@ -443,11 +472,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, break; } - rc = iscsi_check_assign_cmdsn(session, - (struct iscsi_nopin*)hdr); - if (rc) - break; - if (hdr->ttt == ISCSI_RESERVED_TAG) break; @@ -455,7 +479,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, rc = ISCSI_ERR_CONN_FAILED; break; case ISCSI_OP_REJECT: - /* we need sth like iscsi_reject_rsp()*/ + rc = iscsi_handle_reject(conn, hdr, data, datalen); + break; case ISCSI_OP_ASYNC_EVENT: conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; /* we need sth like iscsi_async_event_rsp() */ -- cgit v0.10.2 From dd8c0d958621e3137f3e3302f7b8952041a4a1d7 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:28 -0400 Subject: [SCSI] scsi_tcp: rm data rx and tx tfms We currently allocated seperate tfms for data and header digests. There is no reason for this since we can never calculate a rx header and digest at the same time. Same for sends. So this patch removes the data tfms and has the send and recv sides use the rx_tfm or tx_tfm. I also made the connection creation code preallocate the tfms because I thought I hit a bug where I changed the digests settings during a relogin but could not allocate the tfm and then we just failed. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 290c1d7..82399f7 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -693,7 +693,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len) struct scatterlist tmp; sg_init_one(&tmp, buf, len); - crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); + crypto_digest_update(tcp_conn->rx_tfm, &tmp, 1); } static int iscsi_scsi_data_in(struct iscsi_conn *conn) @@ -748,11 +748,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) if (conn->datadgst_en) { if (!offset) crypto_digest_update( - tcp_conn->data_rx_tfm, + tcp_conn->rx_tfm, &sg[i], 1); else partial_sg_digest_update( - tcp_conn->data_rx_tfm, + tcp_conn->rx_tfm, &sg[i], sg[i].offset + offset, sg[i].length - offset); @@ -766,7 +766,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) /* * data-in is complete, but buffer not... */ - partial_sg_digest_update(tcp_conn->data_rx_tfm, + partial_sg_digest_update(tcp_conn->rx_tfm, &sg[i], sg[i].offset, sg[i].length-rc); rc = 0; @@ -885,10 +885,8 @@ more: */ rc = iscsi_tcp_hdr_recv(conn); if (!rc && tcp_conn->in.datalen) { - if (conn->datadgst_en) { - BUG_ON(!tcp_conn->data_rx_tfm); - crypto_digest_init(tcp_conn->data_rx_tfm); - } + if (conn->datadgst_en) + crypto_digest_init(tcp_conn->rx_tfm); tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; } else if (rc) { iscsi_conn_failure(conn, rc); @@ -940,10 +938,10 @@ more: tcp_conn->in.padding); memset(pad, 0, tcp_conn->in.padding); sg_init_one(&sg, pad, tcp_conn->in.padding); - crypto_digest_update(tcp_conn->data_rx_tfm, + crypto_digest_update(tcp_conn->rx_tfm, &sg, 1); } - crypto_digest_final(tcp_conn->data_rx_tfm, + crypto_digest_final(tcp_conn->rx_tfm, (u8 *) & tcp_conn->in.datadgst); debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; @@ -1188,7 +1186,7 @@ static inline void iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, struct iscsi_tcp_cmd_task *tcp_ctask) { - crypto_digest_init(tcp_conn->data_tx_tfm); + crypto_digest_init(tcp_conn->tx_tfm); tcp_ctask->digest_count = 4; } @@ -1444,7 +1442,7 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, tcp_ctask->pad_count); if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, + crypto_digest_update(tcp_conn->tx_tfm, &tcp_ctask->sendbuf.sg, 1); } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) return 0; @@ -1477,7 +1475,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, tcp_conn = conn->dd_data; if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { - crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); + crypto_digest_final(tcp_conn->tx_tfm, (u8*)digest); iscsi_buf_init_iov(buf, (char*)digest, 4); } tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; @@ -1511,7 +1509,7 @@ iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf, rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent); *sent = *sent + buf_sent; if (buf_sent && conn->datadgst_en) - partial_sg_digest_update(tcp_conn->data_tx_tfm, + partial_sg_digest_update(tcp_conn->tx_tfm, &sendbuf->sg, sendbuf->sg.offset + offset, buf_sent); if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) { @@ -1547,10 +1545,6 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &tcp_ctask->headbuf, (u8*)dtask->hdrext); - if (conn->datadgst_en) { - iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); - dtask->digest = 0; - } tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; iscsi_set_padding(tcp_ctask, ctask->data_count); @@ -1563,6 +1557,12 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) return rc; } + if (conn->datadgst_en) { + dtask = &tcp_ctask->unsol_dtask; + iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); + dtask->digest = 0; + } + debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", ctask->itt, ctask->unsol_count, tcp_ctask->sent); return 0; @@ -1629,12 +1629,6 @@ send_hdr: if (conn->hdrdgst_en) iscsi_hdr_digest(conn, &r2t->headbuf, (u8*)dtask->hdrext); - - if (conn->datadgst_en) { - iscsi_data_digest_init(conn->dd_data, tcp_ctask); - dtask->digest = 0; - } - rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); if (rc) { tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; @@ -1642,6 +1636,11 @@ send_hdr: return rc; } + if (conn->datadgst_en) { + iscsi_data_digest_init(conn->dd_data, tcp_ctask); + dtask->digest = 0; + } + iscsi_set_padding(tcp_ctask, r2t->data_count); debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, @@ -1764,8 +1763,20 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) /* initial operational parameters */ tcp_conn->hdr_size = sizeof(struct iscsi_hdr); + tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", 0); + if (!tcp_conn->tx_tfm) + goto free_tcp_conn; + + tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", 0); + if (!tcp_conn->rx_tfm) + goto free_tx_tfm; + return cls_conn; +free_tx_tfm: + crypto_free_tfm(tcp_conn->tx_tfm); +free_tcp_conn: + kfree(tcp_conn); tcp_conn_alloc_fail: iscsi_conn_teardown(cls_conn); return NULL; @@ -1807,10 +1818,6 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) crypto_free_tfm(tcp_conn->tx_tfm); if (tcp_conn->rx_tfm) crypto_free_tfm(tcp_conn->rx_tfm); - if (tcp_conn->data_tx_tfm) - crypto_free_tfm(tcp_conn->data_tx_tfm); - if (tcp_conn->data_rx_tfm) - crypto_free_tfm(tcp_conn->data_rx_tfm); } kfree(tcp_conn); @@ -1968,48 +1975,11 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, case ISCSI_PARAM_HDRDGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); tcp_conn->hdr_size = sizeof(struct iscsi_hdr); - if (conn->hdrdgst_en) { + if (conn->hdrdgst_en) tcp_conn->hdr_size += sizeof(__u32); - if (!tcp_conn->tx_tfm) - tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", - 0); - if (!tcp_conn->tx_tfm) - return -ENOMEM; - if (!tcp_conn->rx_tfm) - tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", - 0); - if (!tcp_conn->rx_tfm) { - crypto_free_tfm(tcp_conn->tx_tfm); - return -ENOMEM; - } - } else { - if (tcp_conn->tx_tfm) - crypto_free_tfm(tcp_conn->tx_tfm); - if (tcp_conn->rx_tfm) - crypto_free_tfm(tcp_conn->rx_tfm); - } break; case ISCSI_PARAM_DATADGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); - if (conn->datadgst_en) { - if (!tcp_conn->data_tx_tfm) - tcp_conn->data_tx_tfm = - crypto_alloc_tfm("crc32c", 0); - if (!tcp_conn->data_tx_tfm) - return -ENOMEM; - if (!tcp_conn->data_rx_tfm) - tcp_conn->data_rx_tfm = - crypto_alloc_tfm("crc32c", 0); - if (!tcp_conn->data_rx_tfm) { - crypto_free_tfm(tcp_conn->data_tx_tfm); - return -ENOMEM; - } - } else { - if (tcp_conn->data_tx_tfm) - crypto_free_tfm(tcp_conn->data_tx_tfm); - if (tcp_conn->data_rx_tfm) - crypto_free_tfm(tcp_conn->data_rx_tfm); - } tcp_conn->sendpage = conn->datadgst_en ? sock_no_sendpage : tcp_conn->sock->ops->sendpage; break; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 7e40e94..609f477 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -81,10 +81,6 @@ struct iscsi_tcp_conn { * stop to terminate */ /* iSCSI connection-wide sequencing */ int hdr_size; /* PDU header size */ - - struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ - struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ - /* control data */ struct iscsi_tcp_recv in; /* TCP receive context */ int in_progress; /* connection state machine */ @@ -94,9 +90,9 @@ struct iscsi_tcp_conn { void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); - /* xmit */ + /* data and header digests */ struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ - struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ + struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ /* MIB custom statistics */ uint32_t sendpage_failures_cnt; -- cgit v0.10.2 From 753e7d3866748799e4a8769cd27ea7202654211b Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:29 -0400 Subject: [SCSI] iscsi_tcp: fix header resend This patch built over the last ones fixes a bug in the partial header resend code, where we add on another 4 bytes to the send length on the resend. We want just the header plus digest. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 82399f7..541912a 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -109,7 +109,7 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, struct iscsi_tcp_conn *tcp_conn = conn->dd_data; crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); - buf->sg.length += sizeof(uint32_t); + buf->sg.length = tcp_conn->hdr_size; } static inline int -- cgit v0.10.2 From d5390f5f788f01788e9dfd41ad516a2908901610 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:30 -0400 Subject: [SCSI] iscsi_tcp: update header size during relogin When we relogin to a target, we have not yet negotiated digests so we must reset the hdr_size var. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 541912a..5d292d0 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -1827,9 +1827,11 @@ static void iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; iscsi_conn_stop(cls_conn, flag); iscsi_tcp_release_conn(conn); + tcp_conn->hdr_size = sizeof(struct iscsi_hdr); } static int -- cgit v0.10.2 From db98ccde0881b8247acb52dece6d94ed770a7aa5 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:31 -0400 Subject: [SCSI] libiscsi: only check burst lengths when sending unsol data The first burst length is only relevant if immedate data = Yes or if Initial R2T is No Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index fb65311..864c628 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1593,7 +1593,8 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) return -EPERM; } - if (session->first_burst > session->max_burst) { + if ((session->imm_data_en || !session->initial_r2t_en) && + session->first_burst > session->max_burst) { printk("iscsi: invalid burst lengths: " "first_burst %d max_burst %d\n", session->first_burst, session->max_burst); -- cgit v0.10.2 From ca5186842a6d85e982e3d572ecd407453d0c5116 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:32 -0400 Subject: [SCSI] iscsi_tcp: fix partial digest recv When a digest is spread across two network buffers, we currently ignore this and try to check the digest with the partial buffer. Or course this fails. This patch has use iscsi_tcp_copy to copy the whole digest before testing it. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 5d292d0..d91e894 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -648,10 +648,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, * byte counters. **/ static inline int -iscsi_tcp_copy(struct iscsi_conn *conn) +iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - int buf_size = tcp_conn->in.datalen; int buf_left = buf_size - tcp_conn->data_copied; int size = min(tcp_conn->in.copy, buf_left); int rc; @@ -812,7 +811,7 @@ iscsi_data_recv(struct iscsi_conn *conn) * Collect data segment to the connection's data * placeholder */ - if (iscsi_tcp_copy(conn)) { + if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) { rc = -EAGAIN; goto exit; } @@ -899,10 +898,15 @@ more: debug_tcp("extra data_recv offset %d copy %d\n", tcp_conn->in.offset, tcp_conn->in.copy); - skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, - &recv_digest, 4); - tcp_conn->in.offset += 4; - tcp_conn->in.copy -= 4; + rc = iscsi_tcp_copy(conn, sizeof(uint32_t)); + if (rc) { + if (rc == -EAGAIN) + goto again; + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + return 0; + } + + memcpy(&recv_digest, conn->data, sizeof(uint32_t)); if (recv_digest != tcp_conn->in.datadgst) { debug_tcp("iscsi_tcp: data digest error!" "0x%x != 0x%x\n", recv_digest, @@ -942,9 +946,10 @@ more: &sg, 1); } crypto_digest_final(tcp_conn->rx_tfm, - (u8 *) & tcp_conn->in.datadgst); + (u8 *) &tcp_conn->in.datadgst); debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; + tcp_conn->data_copied = 0; } else tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; } -- cgit v0.10.2 From f47f2cf5d4acf929a3aaa6957c3fc4622c358703 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:33 -0400 Subject: [SCSI] libiscsi: check that command ptr is set before accessing it If the scsi eh sends a TUR and the session is down we could return SCSI_ML_HOST_BUSY. scsi eh will ignore this and send ask us to abort the command and we blindly accesst the command ptr. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 864c628..12b5c18 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -192,6 +192,8 @@ static void iscsi_complete_command(struct iscsi_cmd_task *ctask) ctask->state = ISCSI_TASK_COMPLETED; ctask->sc = NULL; + /* SCSI eh reuses commands to verify us */ + sc->SCp.ptr = NULL; list_del_init(&ctask->running); __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); sc->scsi_done(sc); @@ -737,6 +739,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->scsi_done = done; sc->result = 0; + sc->SCp.ptr = NULL; host = sc->device->host; session = iscsi_hostdata(host->hostdata); @@ -801,9 +804,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) list_add_tail(&ctask->running, &conn->xmitqueue); debug_scsi( - "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", + "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d " + "win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", - conn->id, (long)sc, ctask->itt, sc->request_bufflen, + conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); spin_unlock(&session->lock); @@ -1134,11 +1138,24 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, int iscsi_eh_abort(struct scsi_cmnd *sc) { - struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; - struct iscsi_conn *conn = ctask->conn; - struct iscsi_session *session = conn->session; + struct iscsi_cmd_task *ctask; + struct iscsi_conn *conn; + struct iscsi_session *session; int rc; + /* + * if session was ISCSI_STATE_IN_RECOVERY then we may not have + * got the command. + */ + if (!sc->SCp.ptr) { + debug_scsi("sc never reached iscsi layer or it completed.\n"); + return SUCCESS; + } + + ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; + conn = ctask->conn; + session = conn->session; + conn->eh_abort_cnt++; debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); -- cgit v0.10.2 From e648f63c6520d6e572573149c16a64d2c5ad7ec5 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:34 -0400 Subject: [SCSI] libiscsi: don't call into lld to cleanup task In the normal IO path we should not be calling back into the LLD since the LLD will have cleaned up the task before or after calling complete pdu. For the fail_command path we still need to do this to force the cleanup. Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 12b5c18..c542d0e 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -213,12 +213,8 @@ static void iscsi_get_ctask(struct iscsi_cmd_task *ctask) static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) { - struct iscsi_conn *conn = ctask->conn; - - if (atomic_dec_and_test(&ctask->refcount)) { - conn->session->tt->cleanup_cmd_task(conn, ctask); + if (atomic_dec_and_test(&ctask->refcount)) iscsi_complete_command(ctask); - } } static void iscsi_put_ctask(struct iscsi_cmd_task *ctask) @@ -1129,10 +1125,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, sc = ctask->sc; if (!sc) return; + + conn->session->tt->cleanup_cmd_task(conn, ctask); iscsi_ctask_mtask_cleanup(ctask); sc->result = err; sc->resid = sc->request_bufflen; + /* release ref from queuecommand */ __iscsi_put_ctask(ctask); } -- cgit v0.10.2 From 01dfc7fc56f4b7ec0e5344ab44fcf673ebfbf7fa Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 31 Aug 2006 18:09:35 -0400 Subject: [SCSI] iscsi class: update version Signed-off-by: Mike Christie Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 2ecd141..7b0019c 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -34,7 +34,7 @@ #define ISCSI_SESSION_ATTRS 11 #define ISCSI_CONN_ATTRS 11 #define ISCSI_HOST_ATTRS 0 -#define ISCSI_TRANSPORT_VERSION "1.1-646" +#define ISCSI_TRANSPORT_VERSION "2.0-685" struct iscsi_internal { int daemon_pid; -- cgit v0.10.2 From 69bdd88ca2670c321fef774e77059516f836c6f2 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 1 Sep 2006 15:50:23 +0200 Subject: [SCSI] Wrong size information for devices with disabled read access When accessing a device with disabled read access the capacity is set randomly to 1GB. This makes it impossible to userspace tools to detect invalid device capacities. Signed-off-by: Mike Anderson Acked-by: Chris Mason Signed-off-by: Hannes Reinecke Signed-off-by: James Bottomley diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 98bd3aa..638cff4 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1215,7 +1215,7 @@ repeat: /* Either no media are present but the drive didn't tell us, or they are present but the read capacity command fails */ /* sdkp->media_present = 0; -- not always correct */ - sdkp->capacity = 0x200000; /* 1 GB - random */ + sdkp->capacity = 0; /* unknown mapped to zero - as usual */ return; } else if (the_result && longrc) { -- cgit v0.10.2 From 5a25ba1677ab8d63890016a8c1bca68a3e0fbc7d Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Fri, 1 Sep 2006 03:12:19 -0400 Subject: [SCSI] Add Promise SuperTrak driver Add Promise SuperTrak 'stex' driver, supporting SuperTrak EX8350/8300/16350/16300 controllers. The controller's firmware accepts SCSI commands, handing them to the underlying RAID or JBOD disks. The driver consisted of the following cleanups and fixes, beyond its initial submission: Ed Lin: stex: cleanup and minor fixes stex: add new device ids stex: update internal copy code path stex: add hard reset function stex: adjust command timeout in slave_config routine stex: use more efficient method for unload/shutdown flush Jeff Garzik: [SCSI] Add Promise SuperTrak 'shasta' driver. Rename drivers/scsi/shasta.c to stex.c ("SuperTrak EX"). [SCSI] stex: update with community comments from 'Promise SuperTrak' thread [SCSI] stex: Fix warning, trim trailing whitespace. [SCSI] stex: remove last remnants of "shasta" project code name [SCSI] stex: removed 6-byte command emulation [SCSI] stex: minor cleanups [SCSI] stex: minor fixes: irq flag, error return value [SCSI] stex: use dma_alloc_coherent() Signed-off-by: Jeff Garzik Signed-off-by: James Bottomley diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 7de5fdf..c8c6065 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1070,6 +1070,13 @@ config 53C700_LE_ON_BE depends on SCSI_LASI700 default y +config SCSI_STEX + tristate "Promise SuperTrak EX Series support" + depends on PCI && SCSI + ---help--- + This driver supports Promise SuperTrak EX8350/8300/16350/16300 + Storage controllers. + config SCSI_SYM53C8XX_2 tristate "SYM53C8XX Version 2 SCSI support" depends on PCI && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 83da70d..fd9aeb1 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o +obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c new file mode 100644 index 0000000..fd09330 --- /dev/null +++ b/drivers/scsi/stex.c @@ -0,0 +1,1316 @@ +/* + * SuperTrak EX Series Storage Controller driver for Linux + * + * Copyright (C) 2005, 2006 Promise Technology Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Written By: + * Ed Lin + * + * Version: 2.9.0.13 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "stex" +#define ST_DRIVER_VERSION "2.9.0.13" +#define ST_VER_MAJOR 2 +#define ST_VER_MINOR 9 +#define ST_OEM 0 +#define ST_BUILD_VER 13 + +enum { + /* MU register offset */ + IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */ + IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */ + OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */ + OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */ + IDBL = 0x20, /* MU_INBOUND_DOORBELL */ + IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */ + IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */ + ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */ + OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ + OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ + + /* MU register value */ + MU_INBOUND_DOORBELL_HANDSHAKE = 1, + MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, + MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4, + MU_INBOUND_DOORBELL_HMUSTOPPED = 8, + MU_INBOUND_DOORBELL_RESET = 16, + + MU_OUTBOUND_DOORBELL_HANDSHAKE = 1, + MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2, + MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4, + MU_OUTBOUND_DOORBELL_BUSCHANGE = 8, + MU_OUTBOUND_DOORBELL_HASEVENT = 16, + + /* MU status code */ + MU_STATE_STARTING = 1, + MU_STATE_FMU_READY_FOR_HANDSHAKE = 2, + MU_STATE_SEND_HANDSHAKE_FRAME = 3, + MU_STATE_STARTED = 4, + MU_STATE_RESETTING = 5, + + MU_MAX_DELAY_TIME = 240000, + MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, + HMU_PARTNER_TYPE = 2, + + /* firmware returned values */ + SRB_STATUS_SUCCESS = 0x01, + SRB_STATUS_ERROR = 0x04, + SRB_STATUS_BUSY = 0x05, + SRB_STATUS_INVALID_REQUEST = 0x06, + SRB_STATUS_SELECTION_TIMEOUT = 0x0A, + SRB_SEE_SENSE = 0x80, + + /* task attribute */ + TASK_ATTRIBUTE_SIMPLE = 0x0, + TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, + TASK_ATTRIBUTE_ORDERED = 0x2, + TASK_ATTRIBUTE_ACA = 0x4, + + /* request count, etc. */ + MU_MAX_REQUEST = 32, + TAG_BITMAP_LENGTH = MU_MAX_REQUEST, + + /* one message wasted, use MU_MAX_REQUEST+1 + to handle MU_MAX_REQUEST messages */ + MU_REQ_COUNT = (MU_MAX_REQUEST + 1), + MU_STATUS_COUNT = (MU_MAX_REQUEST + 1), + + STEX_CDB_LENGTH = MAX_COMMAND_SIZE, + REQ_VARIABLE_LEN = 1024, + STATUS_VAR_LEN = 128, + ST_CAN_QUEUE = MU_MAX_REQUEST, + ST_CMD_PER_LUN = MU_MAX_REQUEST, + ST_MAX_SG = 32, + + /* sg flags */ + SG_CF_EOT = 0x80, /* end of table */ + SG_CF_64B = 0x40, /* 64 bit item */ + SG_CF_HOST = 0x20, /* sg in host memory */ + + ST_MAX_ARRAY_SUPPORTED = 16, + ST_MAX_TARGET_NUM = (ST_MAX_ARRAY_SUPPORTED+1), + ST_MAX_LUN_PER_TARGET = 16, + + st_shasta = 0, + st_vsc = 1, + + PASSTHRU_REQ_TYPE = 0x00000001, + PASSTHRU_REQ_NO_WAKEUP = 0x00000100, + ST_INTERNAL_TIMEOUT = 30, + + /* vendor specific commands of Promise */ + ARRAY_CMD = 0xe0, + CONTROLLER_CMD = 0xe1, + DEBUGGING_CMD = 0xe2, + PASSTHRU_CMD = 0xe3, + + PASSTHRU_GET_ADAPTER = 0x05, + PASSTHRU_GET_DRVVER = 0x10, + CTLR_POWER_STATE_CHANGE = 0x0e, + CTLR_POWER_SAVING = 0x01, + + PASSTHRU_SIGNATURE = 0x4e415041, + + INQUIRY_EVPD = 0x01, +}; + +struct st_sgitem { + u8 ctrl; /* SG_CF_xxx */ + u8 reserved[3]; + __le32 count; + __le32 addr; + __le32 addr_hi; +}; + +struct st_sgtable { + __le16 sg_count; + __le16 max_sg_count; + __le32 sz_in_byte; + struct st_sgitem table[ST_MAX_SG]; +}; + +struct handshake_frame { + __le32 rb_phy; /* request payload queue physical address */ + __le32 rb_phy_hi; + __le16 req_sz; /* size of each request payload */ + __le16 req_cnt; /* count of reqs the buffer can hold */ + __le16 status_sz; /* size of each status payload */ + __le16 status_cnt; /* count of status the buffer can hold */ + __le32 hosttime; /* seconds from Jan 1, 1970 (GMT) */ + __le32 hosttime_hi; + u8 partner_type; /* who sends this frame */ + u8 reserved0[7]; + __le32 partner_ver_major; + __le32 partner_ver_minor; + __le32 partner_ver_oem; + __le32 partner_ver_build; + u32 reserved1[4]; +}; + +struct req_msg { + __le16 tag; + u8 lun; + u8 target; + u8 task_attr; + u8 task_manage; + u8 prd_entry; + u8 payload_sz; /* payload size in 4-byte */ + u8 cdb[STEX_CDB_LENGTH]; + u8 variable[REQ_VARIABLE_LEN]; +}; + +struct status_msg { + __le16 tag; + u8 lun; + u8 target; + u8 srb_status; + u8 scsi_status; + u8 reserved; + u8 payload_sz; /* payload size in 4-byte */ + u8 variable[STATUS_VAR_LEN]; +}; + +struct ver_info { + u32 major; + u32 minor; + u32 oem; + u32 build; + u32 reserved[2]; +}; + +struct st_frame { + u32 base[6]; + u32 rom_addr; + + struct ver_info drv_ver; + struct ver_info bios_ver; + + u32 bus; + u32 slot; + u32 irq_level; + u32 irq_vec; + u32 id; + u32 subid; + + u32 dimm_size; + u8 dimm_type; + u8 reserved[3]; + + u32 channel; + u32 reserved1; +}; + +struct st_drvver { + u32 major; + u32 minor; + u32 oem; + u32 build; + u32 signature[2]; + u8 console_id; + u8 host_no; + u8 reserved0[2]; + u32 reserved[3]; +}; + +#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg)) +#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg)) +#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE) +#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + sizeof(struct st_frame)) + +struct st_ccb { + struct req_msg *req; + struct scsi_cmnd *cmd; + + void *sense_buffer; + unsigned int sense_bufflen; + int sg_count; + + u32 req_type; + u8 srb_status; + u8 scsi_status; +}; + +struct st_hba { + void __iomem *mmio_base; /* iomapped PCI memory space */ + void *dma_mem; + dma_addr_t dma_handle; + + struct Scsi_Host *host; + struct pci_dev *pdev; + + u32 tag; + u32 req_head; + u32 req_tail; + u32 status_head; + u32 status_tail; + + struct status_msg *status_buffer; + void *copy_buffer; /* temp buffer for driver-handled commands */ + struct st_ccb ccb[MU_MAX_REQUEST]; + struct st_ccb *wait_ccb; + wait_queue_head_t waitq; + + unsigned int mu_status; + int out_req_cnt; + + unsigned int cardtype; +}; + +static const char console_inq_page[] = +{ + 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30, + 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */ + 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */ + 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */ + 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */ + 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */ + 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */ + 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20 +}; + +MODULE_AUTHOR("Ed Lin"); +MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ST_DRIVER_VERSION); + +static void stex_gettime(__le32 *time) +{ + struct timeval tv; + do_gettimeofday(&tv); + + *time = cpu_to_le32(tv.tv_sec & 0xffffffff); + *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16); +} + +static u16 __stex_alloc_tag(unsigned long *bitmap) +{ + int i; + i = find_first_zero_bit(bitmap, TAG_BITMAP_LENGTH); + if (i < TAG_BITMAP_LENGTH) + __set_bit(i, bitmap); + return (u16)i; +} + +static u16 stex_alloc_tag(struct st_hba *hba, unsigned long *bitmap) +{ + unsigned long flags; + u16 tag; + + spin_lock_irqsave(hba->host->host_lock, flags); + tag = __stex_alloc_tag(bitmap); + spin_unlock_irqrestore(hba->host->host_lock, flags); + return tag; +} + +static void __stex_free_tag(unsigned long *bitmap, u16 tag) +{ + __clear_bit((int)tag, bitmap); +} + +static void stex_free_tag(struct st_hba *hba, unsigned long *bitmap, u16 tag) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + __stex_free_tag(bitmap, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + +static struct status_msg *stex_get_status(struct st_hba *hba) +{ + struct status_msg *status = + hba->status_buffer + hba->status_tail; + + ++hba->status_tail; + hba->status_tail %= MU_STATUS_COUNT; + + return status; +} + +static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) +{ + cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; /* fixed format, current */ + cmd->sense_buffer[2] = sk; + cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ + cmd->sense_buffer[12] = asc; + cmd->sense_buffer[13] = ascq; +} + +static void stex_invalid_field(struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *)) +{ + /* "Invalid field in cbd" */ + stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); + done(cmd); +} + +static struct req_msg *stex_alloc_req(struct st_hba *hba) +{ + struct req_msg *req = ((struct req_msg *)hba->dma_mem) + + hba->req_head; + + ++hba->req_head; + hba->req_head %= MU_REQ_COUNT; + + return req; +} + +static int stex_map_sg(struct st_hba *hba, + struct req_msg *req, struct st_ccb *ccb) +{ + struct pci_dev *pdev = hba->pdev; + struct scsi_cmnd *cmd; + dma_addr_t dma_handle; + struct scatterlist *src; + struct st_sgtable *dst; + int i; + + cmd = ccb->cmd; + dst = (struct st_sgtable *)req->variable; + dst->max_sg_count = cpu_to_le16(ST_MAX_SG); + dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); + + if (cmd->use_sg) { + int n_elem; + + src = (struct scatterlist *) cmd->request_buffer; + n_elem = pci_map_sg(pdev, src, + cmd->use_sg, cmd->sc_data_direction); + if (n_elem <= 0) + return -EIO; + + ccb->sg_count = n_elem; + dst->sg_count = cpu_to_le16((u16)n_elem); + + for (i = 0; i < n_elem; i++, src++) { + dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src)); + dst->table[i].addr = + cpu_to_le32(sg_dma_address(src) & 0xffffffff); + dst->table[i].addr_hi = + cpu_to_le32((sg_dma_address(src) >> 16) >> 16); + dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; + } + dst->table[--i].ctrl |= SG_CF_EOT; + return 0; + } + + dma_handle = pci_map_single(pdev, cmd->request_buffer, + cmd->request_bufflen, cmd->sc_data_direction); + cmd->SCp.dma_handle = dma_handle; + + ccb->sg_count = 1; + dst->sg_count = cpu_to_le16(1); + dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff); + dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16); + dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen); + dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST; + + return 0; +} + +static void stex_internal_copy(struct scsi_cmnd *cmd, + const void *src, size_t *count, int sg_count) +{ + size_t lcount; + size_t len; + void *s, *d, *base = NULL; + if (*count > cmd->request_bufflen) + *count = cmd->request_bufflen; + lcount = *count; + while (lcount) { + len = lcount; + s = (void *)src; + if (cmd->use_sg) { + size_t offset = *count - lcount; + s += offset; + base = scsi_kmap_atomic_sg(cmd->request_buffer, + sg_count, &offset, &len); + if (base == NULL) { + *count -= lcount; + return; + } + d = base + offset; + } else + d = cmd->request_buffer; + + memcpy(d, s, len); + + lcount -= len; + if (cmd->use_sg) + scsi_kunmap_atomic_sg(base); + } +} + +static int stex_direct_copy(struct scsi_cmnd *cmd, + const void *src, size_t count) +{ + struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0]; + size_t cp_len = count; + int n_elem = 0; + + if (cmd->use_sg) { + n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, + cmd->use_sg, cmd->sc_data_direction); + if (n_elem <= 0) + return 0; + } + + stex_internal_copy(cmd, src, &cp_len, n_elem); + + if (cmd->use_sg) + pci_unmap_sg(hba->pdev, cmd->request_buffer, + cmd->use_sg, cmd->sc_data_direction); + return cp_len == count; +} + +static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) +{ + struct st_frame *p; + size_t count = sizeof(struct st_frame); + + p = hba->copy_buffer; + memset(p->base, 0, sizeof(u32)*6); + *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); + p->rom_addr = 0; + + p->drv_ver.major = ST_VER_MAJOR; + p->drv_ver.minor = ST_VER_MINOR; + p->drv_ver.oem = ST_OEM; + p->drv_ver.build = ST_BUILD_VER; + + p->bus = hba->pdev->bus->number; + p->slot = hba->pdev->devfn; + p->irq_level = 0; + p->irq_vec = hba->pdev->irq; + p->id = hba->pdev->vendor << 16 | hba->pdev->device; + p->subid = + hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; + + stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count); +} + +static void +stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) +{ + req->tag = cpu_to_le16(tag); + req->task_attr = TASK_ATTRIBUTE_SIMPLE; + req->task_manage = 0; /* not supported yet */ + req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32)); + + hba->ccb[tag].req = req; + hba->out_req_cnt++; + + writel(hba->req_head, hba->mmio_base + IMR0); + writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); + readl(hba->mmio_base + IDBL); /* flush */ +} + +static int +stex_slave_config(struct scsi_device *sdev) +{ + sdev->use_10_for_rw = 1; + sdev->use_10_for_ms = 1; + sdev->timeout = 60 * HZ; + return 0; +} + +static void +stex_slave_destroy(struct scsi_device *sdev) +{ + struct st_hba *hba = (struct st_hba *) sdev->host->hostdata; + struct req_msg *req; + unsigned long flags; + unsigned long before; + u16 tag; + + if (sdev->type != TYPE_DISK) + return; + + before = jiffies; + while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag)) + == TAG_BITMAP_LENGTH) { + if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) + return; + msleep(10); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + req = stex_alloc_req(hba); + memset(req->cdb, 0, STEX_CDB_LENGTH); + + req->target = sdev->id; + req->lun = sdev->channel; /* firmware lun issue work around */ + req->cdb[0] = SYNCHRONIZE_CACHE; + + hba->ccb[tag].cmd = NULL; + hba->ccb[tag].sg_count = 0; + hba->ccb[tag].sense_bufflen = 0; + hba->ccb[tag].sense_buffer = NULL; + hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE; + + stex_send_cmd(hba, req, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + wait_event_timeout(hba->waitq, + !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); + if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) + return; + + stex_free_tag(hba, (unsigned long *)&hba->tag, tag); +} + +static int +stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) +{ + struct st_hba *hba; + struct Scsi_Host *host; + unsigned int id,lun; + struct req_msg *req; + u16 tag; + host = cmd->device->host; + id = cmd->device->id; + lun = cmd->device->channel; /* firmware lun issue work around */ + hba = (struct st_hba *) &host->hostdata[0]; + + switch (cmd->cmnd[0]) { + case MODE_SENSE_10: + { + static char ms10_caching_page[12] = + { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 }; + unsigned char page; + page = cmd->cmnd[2] & 0x3f; + if (page == 0x8 || page == 0x3f) { + stex_direct_copy(cmd, ms10_caching_page, + sizeof(ms10_caching_page)); + cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + done(cmd); + } else + stex_invalid_field(cmd, done); + return 0; + } + case INQUIRY: + if (id != ST_MAX_ARRAY_SUPPORTED) + break; + if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { + stex_direct_copy(cmd, console_inq_page, + sizeof(console_inq_page)); + cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + done(cmd); + } else + stex_invalid_field(cmd, done); + return 0; + case PASSTHRU_CMD: + if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { + struct st_drvver ver; + ver.major = ST_VER_MAJOR; + ver.minor = ST_VER_MINOR; + ver.oem = ST_OEM; + ver.build = ST_BUILD_VER; + ver.signature[0] = PASSTHRU_SIGNATURE; + ver.console_id = ST_MAX_ARRAY_SUPPORTED; + ver.host_no = hba->host->host_no; + cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? + DID_OK << 16 | COMMAND_COMPLETE << 8 : + DID_ERROR << 16 | COMMAND_COMPLETE << 8; + done(cmd); + return 0; + } + default: + break; + } + + cmd->scsi_done = done; + + if (unlikely((tag = __stex_alloc_tag((unsigned long *)&hba->tag)) + == TAG_BITMAP_LENGTH)) + return SCSI_MLQUEUE_HOST_BUSY; + + req = stex_alloc_req(hba); + req->lun = lun; + req->target = id; + + /* cdb */ + memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH); + + hba->ccb[tag].cmd = cmd; + hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; + hba->ccb[tag].sense_buffer = cmd->sense_buffer; + hba->ccb[tag].req_type = 0; + + if (cmd->sc_data_direction != DMA_NONE) + stex_map_sg(hba, req, &hba->ccb[tag]); + + stex_send_cmd(hba, req, tag); + return 0; +} + +static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd) +{ + if (cmd->sc_data_direction != DMA_NONE) { + if (cmd->use_sg) + pci_unmap_sg(hba->pdev, cmd->request_buffer, + cmd->use_sg, cmd->sc_data_direction); + else + pci_unmap_single(hba->pdev, cmd->SCp.dma_handle, + cmd->request_bufflen, cmd->sc_data_direction); + } +} + +static void stex_scsi_done(struct st_ccb *ccb) +{ + struct scsi_cmnd *cmd = ccb->cmd; + int result; + + if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) { + result = ccb->scsi_status; + switch (ccb->scsi_status) { + case SAM_STAT_GOOD: + result |= DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + case SAM_STAT_CHECK_CONDITION: + result |= DRIVER_SENSE << 24; + break; + case SAM_STAT_BUSY: + result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; + break; + default: + result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8; + break; + } + } + else if (ccb->srb_status & SRB_SEE_SENSE) + result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION; + else switch (ccb->srb_status) { + case SRB_STATUS_SELECTION_TIMEOUT: + result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; + break; + case SRB_STATUS_BUSY: + result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; + break; + case SRB_STATUS_INVALID_REQUEST: + case SRB_STATUS_ERROR: + default: + result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; + break; + } + + cmd->result = result; + cmd->scsi_done(cmd); +} + +static void stex_copy_data(struct st_ccb *ccb, + struct status_msg *resp, unsigned int variable) +{ + size_t count = variable; + if (resp->scsi_status != SAM_STAT_GOOD) { + if (ccb->sense_buffer != NULL) + memcpy(ccb->sense_buffer, resp->variable, + min(variable, ccb->sense_bufflen)); + return; + } + + if (ccb->cmd == NULL) + return; + stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count); +} + +static void stex_mu_intr(struct st_hba *hba, u32 doorbell) +{ + void __iomem *base = hba->mmio_base; + struct status_msg *resp; + struct st_ccb *ccb; + unsigned int size; + u16 tag; + + if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)) + return; + + /* status payloads */ + hba->status_head = readl(base + OMR1); + if (unlikely(hba->status_head >= MU_STATUS_COUNT)) { + printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n", + pci_name(hba->pdev)); + return; + } + + if (unlikely(hba->mu_status != MU_STATE_STARTED || + hba->out_req_cnt <= 0)) { + hba->status_tail = hba->status_head; + goto update_status; + } + + while (hba->status_tail != hba->status_head) { + resp = stex_get_status(hba); + tag = le16_to_cpu(resp->tag); + if (unlikely(tag >= TAG_BITMAP_LENGTH)) { + printk(KERN_WARNING DRV_NAME + "(%s): invalid tag\n", pci_name(hba->pdev)); + continue; + } + if (unlikely((hba->tag & (1 << tag)) == 0)) { + printk(KERN_WARNING DRV_NAME + "(%s): null tag\n", pci_name(hba->pdev)); + continue; + } + + hba->out_req_cnt--; + ccb = &hba->ccb[tag]; + if (hba->wait_ccb == ccb) + hba->wait_ccb = NULL; + if (unlikely(ccb->req == NULL)) { + printk(KERN_WARNING DRV_NAME + "(%s): lagging req\n", pci_name(hba->pdev)); + __stex_free_tag((unsigned long *)&hba->tag, tag); + stex_unmap_sg(hba, ccb->cmd); /* ??? */ + continue; + } + + size = resp->payload_sz * sizeof(u32); /* payload size */ + if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || + size > sizeof(*resp))) { + printk(KERN_WARNING DRV_NAME "(%s): bad status size\n", + pci_name(hba->pdev)); + } else { + size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */ + if (size) + stex_copy_data(ccb, resp, size); + } + + ccb->srb_status = resp->srb_status; + ccb->scsi_status = resp->scsi_status; + + if (ccb->req_type & PASSTHRU_REQ_TYPE) { + if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) { + ccb->req_type = 0; + continue; + } + ccb->req_type = 0; + if (waitqueue_active(&hba->waitq)) + wake_up(&hba->waitq); + continue; + } + if (ccb->cmd->cmnd[0] == PASSTHRU_CMD && + ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER) + stex_controller_info(hba, ccb); + __stex_free_tag((unsigned long *)&hba->tag, tag); + stex_unmap_sg(hba, ccb->cmd); + stex_scsi_done(ccb); + } + +update_status: + writel(hba->status_head, base + IMR1); + readl(base + IMR1); /* flush */ +} + +static irqreturn_t stex_intr(int irq, void *__hba, struct pt_regs *regs) +{ + struct st_hba *hba = __hba; + void __iomem *base = hba->mmio_base; + u32 data; + unsigned long flags; + int handled = 0; + + spin_lock_irqsave(hba->host->host_lock, flags); + + data = readl(base + ODBL); + + if (data && data != 0xffffffff) { + /* clear the interrupt */ + writel(data, base + ODBL); + readl(base + ODBL); /* flush */ + stex_mu_intr(hba, data); + handled = 1; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return IRQ_RETVAL(handled); +} + +static int stex_handshake(struct st_hba *hba) +{ + void __iomem *base = hba->mmio_base; + struct handshake_frame *h; + dma_addr_t status_phys; + int i; + + if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) { + writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); + readl(base + IDBL); + for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE + && i < MU_MAX_DELAY_TIME; i++) { + rmb(); + msleep(1); + } + + if (i == MU_MAX_DELAY_TIME) { + printk(KERN_ERR DRV_NAME + "(%s): no handshake signature\n", + pci_name(hba->pdev)); + return -1; + } + } + + udelay(10); + + h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE); + h->rb_phy = cpu_to_le32(hba->dma_handle); + h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16); + h->req_sz = cpu_to_le16(sizeof(struct req_msg)); + h->req_cnt = cpu_to_le16(MU_REQ_COUNT); + h->status_sz = cpu_to_le16(sizeof(struct status_msg)); + h->status_cnt = cpu_to_le16(MU_STATUS_COUNT); + stex_gettime(&h->hosttime); + h->partner_type = HMU_PARTNER_TYPE; + + status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE; + writel(status_phys, base + IMR0); + readl(base + IMR0); + writel((status_phys >> 16) >> 16, base + IMR1); + readl(base + IMR1); + + writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */ + readl(base + OMR0); + writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL); + readl(base + IDBL); /* flush */ + + udelay(10); + for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE + && i < MU_MAX_DELAY_TIME; i++) { + rmb(); + msleep(1); + } + + if (i == MU_MAX_DELAY_TIME) { + printk(KERN_ERR DRV_NAME + "(%s): no signature after handshake frame\n", + pci_name(hba->pdev)); + return -1; + } + + writel(0, base + IMR0); + readl(base + IMR0); + writel(0, base + OMR0); + readl(base + OMR0); + writel(0, base + IMR1); + readl(base + IMR1); + writel(0, base + OMR1); + readl(base + OMR1); /* flush */ + hba->mu_status = MU_STATE_STARTED; + return 0; +} + +static int stex_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct st_hba *hba = (struct st_hba *)host->hostdata; + u16 tag; + void __iomem *base; + u32 data; + int result = SUCCESS; + unsigned long flags; + base = hba->mmio_base; + spin_lock_irqsave(host->host_lock, flags); + + for (tag = 0; tag < MU_MAX_REQUEST; tag++) + if (hba->ccb[tag].cmd == cmd && (hba->tag & (1 << tag))) { + hba->wait_ccb = &(hba->ccb[tag]); + break; + } + if (tag >= MU_MAX_REQUEST) + goto out; + + data = readl(base + ODBL); + if (data == 0 || data == 0xffffffff) + goto fail_out; + + writel(data, base + ODBL); + readl(base + ODBL); /* flush */ + + stex_mu_intr(hba, data); + + if (hba->wait_ccb == NULL) { + printk(KERN_WARNING DRV_NAME + "(%s): lost interrupt\n", pci_name(hba->pdev)); + goto out; + } + +fail_out: + hba->wait_ccb->req = NULL; /* nullify the req's future return */ + hba->wait_ccb = NULL; + result = FAILED; +out: + spin_unlock_irqrestore(host->host_lock, flags); + return result; +} + +static void stex_hard_reset(struct st_hba *hba) +{ + struct pci_bus *bus; + int i; + u16 pci_cmd; + u8 pci_bctl; + + for (i = 0; i < 16; i++) + pci_read_config_dword(hba->pdev, i * 4, + &hba->pdev->saved_config_space[i]); + + /* Reset secondary bus. Our controller(MU/ATU) is the only device on + secondary bus. Consult Intel 80331/3 developer's manual for detail */ + bus = hba->pdev->bus; + pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl); + pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); + msleep(1); + pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl); + + for (i = 0; i < MU_MAX_DELAY_TIME; i++) { + pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_MASTER) + break; + msleep(1); + } + + ssleep(5); + for (i = 0; i < 16; i++) + pci_write_config_dword(hba->pdev, i * 4, + hba->pdev->saved_config_space[i]); +} + +static int stex_reset(struct scsi_cmnd *cmd) +{ + struct st_hba *hba; + unsigned long flags; + hba = (struct st_hba *) &cmd->device->host->hostdata[0]; + + hba->mu_status = MU_STATE_RESETTING; + + if (hba->cardtype == st_shasta) + stex_hard_reset(hba); + + if (stex_handshake(hba)) { + printk(KERN_WARNING DRV_NAME + "(%s): resetting: handshake failed\n", + pci_name(hba->pdev)); + return FAILED; + } + spin_lock_irqsave(hba->host->host_lock, flags); + hba->tag = 0; + hba->req_head = 0; + hba->req_tail = 0; + hba->status_head = 0; + hba->status_tail = 0; + hba->out_req_cnt = 0; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return SUCCESS; +} + +static int stex_biosparam(struct scsi_device *sdev, + struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads = 255, sectors = 63, cylinders; + + if (capacity < 0x200000) { + heads = 64; + sectors = 32; + } + + cylinders = sector_div(capacity, heads * sectors); + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} + +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = DRV_NAME, + .proc_name = DRV_NAME, + .bios_param = stex_biosparam, + .queuecommand = stex_queuecommand, + .slave_configure = stex_slave_config, + .slave_destroy = stex_slave_destroy, + .eh_abort_handler = stex_abort, + .eh_host_reset_handler = stex_reset, + .can_queue = ST_CAN_QUEUE, + .this_id = -1, + .sg_tablesize = ST_MAX_SG, + .cmd_per_lun = ST_CMD_PER_LUN, +}; + +static int stex_set_dma_mask(struct pci_dev * pdev) +{ + int ret; + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) + && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) + return 0; + ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + return ret; +} + +static int __devinit +stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct st_hba *hba; + struct Scsi_Host *host; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_master(pdev); + + host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); + + if (!host) { + printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_disable; + } + + hba = (struct st_hba *)host->hostdata; + memset(hba, 0, sizeof(struct st_hba)); + + err = pci_request_regions(pdev, DRV_NAME); + if (err < 0) { + printk(KERN_ERR DRV_NAME "(%s): request regions failed\n", + pci_name(pdev)); + goto out_scsi_host_put; + } + + hba->mmio_base = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if ( !hba->mmio_base) { + printk(KERN_ERR DRV_NAME "(%s): memory map failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_release_regions; + } + + err = stex_set_dma_mask(pdev); + if (err) { + printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", + pci_name(pdev)); + goto out_iounmap; + } + + hba->dma_mem = dma_alloc_coherent(&pdev->dev, + STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL); + if (!hba->dma_mem) { + err = -ENOMEM; + printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n", + pci_name(pdev)); + goto out_iounmap; + } + + hba->status_buffer = + (struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE); + hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE; + hba->mu_status = MU_STATE_STARTING; + + hba->cardtype = (unsigned int) id->driver_data; + + /* firmware uses id/lun pair for a logical drive, but lun would be + always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use + channel to map lun here */ + host->max_channel = ST_MAX_LUN_PER_TARGET - 1; + host->max_id = ST_MAX_TARGET_NUM; + host->max_lun = 1; + host->unique_id = host->host_no; + host->max_cmd_len = STEX_CDB_LENGTH; + + hba->host = host; + hba->pdev = pdev; + init_waitqueue_head(&hba->waitq); + + err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba); + if (err) { + printk(KERN_ERR DRV_NAME "(%s): request irq failed\n", + pci_name(pdev)); + goto out_pci_free; + } + + err = stex_handshake(hba); + if (err) + goto out_free_irq; + + pci_set_drvdata(pdev, hba); + + err = scsi_add_host(host, &pdev->dev); + if (err) { + printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n", + pci_name(pdev)); + goto out_free_irq; + } + + scsi_scan_host(host); + + return 0; + +out_free_irq: + free_irq(pdev->irq, hba); +out_pci_free: + dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE, + hba->dma_mem, hba->dma_handle); +out_iounmap: + iounmap(hba->mmio_base); +out_release_regions: + pci_release_regions(pdev); +out_scsi_host_put: + scsi_host_put(host); +out_disable: + pci_disable_device(pdev); + + return err; +} + +static void stex_hba_stop(struct st_hba *hba) +{ + struct req_msg *req; + unsigned long flags; + unsigned long before; + u16 tag; + + before = jiffies; + while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag)) + == TAG_BITMAP_LENGTH) { + if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) + return; + msleep(10); + } + + spin_lock_irqsave(hba->host->host_lock, flags); + req = stex_alloc_req(hba); + memset(req->cdb, 0, STEX_CDB_LENGTH); + + req->cdb[0] = CONTROLLER_CMD; + req->cdb[1] = CTLR_POWER_STATE_CHANGE; + req->cdb[2] = CTLR_POWER_SAVING; + + hba->ccb[tag].cmd = NULL; + hba->ccb[tag].sg_count = 0; + hba->ccb[tag].sense_bufflen = 0; + hba->ccb[tag].sense_buffer = NULL; + hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE; + + stex_send_cmd(hba, req, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + wait_event_timeout(hba->waitq, + !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); + if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) + return; + + stex_free_tag(hba, (unsigned long *)&hba->tag, tag); +} + +static void stex_hba_free(struct st_hba *hba) +{ + free_irq(hba->pdev->irq, hba); + + iounmap(hba->mmio_base); + + pci_release_regions(hba->pdev); + + dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE, + hba->dma_mem, hba->dma_handle); +} + +static void stex_remove(struct pci_dev *pdev) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + scsi_remove_host(hba->host); + + pci_set_drvdata(pdev, NULL); + + stex_hba_stop(hba); + + stex_hba_free(hba); + + scsi_host_put(hba->host); + + pci_disable_device(pdev); +} + +static void stex_shutdown(struct pci_dev *pdev) +{ + struct st_hba *hba = pci_get_drvdata(pdev); + + stex_hba_stop(hba); +} + +static struct pci_device_id stex_pci_tbl[] = { + { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta }, + { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, stex_pci_tbl); + +static struct pci_driver stex_pci_driver = { + .name = DRV_NAME, + .id_table = stex_pci_tbl, + .probe = stex_probe, + .remove = __devexit_p(stex_remove), + .shutdown = stex_shutdown, +}; + +static int __init stex_init(void) +{ + printk(KERN_INFO DRV_NAME + ": Promise SuperTrak EX Driver version: %s\n", + ST_DRIVER_VERSION); + + return pci_register_driver(&stex_pci_driver); +} + +static void __exit stex_exit(void) +{ + pci_unregister_driver(&stex_pci_driver); +} + +module_init(stex_init); +module_exit(stex_exit); -- cgit v0.10.2 From cf355883f506051a8ce3ac4539752829320b6c8c Mon Sep 17 00:00:00 2001 From: Ed Lin Date: Fri, 1 Sep 2006 14:31:51 +0800 Subject: [SCSI] stex: add shared tags from block Use block shared tags entirely within the driver. In the case of shutdown, assume that there are no other outstanding commands, so tag 0 is fine. Signed-off-by: Ed Lin Signed-off-by: James Bottomley diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index fd09330..15fb99f 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -34,6 +34,7 @@ #include #include #include +#include #define DRV_NAME "stex" #define ST_DRIVER_VERSION "2.9.0.13" @@ -95,7 +96,6 @@ enum { /* request count, etc. */ MU_MAX_REQUEST = 32, - TAG_BITMAP_LENGTH = MU_MAX_REQUEST, /* one message wasted, use MU_MAX_REQUEST+1 to handle MU_MAX_REQUEST messages */ @@ -265,7 +265,6 @@ struct st_hba { struct Scsi_Host *host; struct pci_dev *pdev; - u32 tag; u32 req_head; u32 req_tail; u32 status_head; @@ -309,40 +308,6 @@ static void stex_gettime(__le32 *time) *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16); } -static u16 __stex_alloc_tag(unsigned long *bitmap) -{ - int i; - i = find_first_zero_bit(bitmap, TAG_BITMAP_LENGTH); - if (i < TAG_BITMAP_LENGTH) - __set_bit(i, bitmap); - return (u16)i; -} - -static u16 stex_alloc_tag(struct st_hba *hba, unsigned long *bitmap) -{ - unsigned long flags; - u16 tag; - - spin_lock_irqsave(hba->host->host_lock, flags); - tag = __stex_alloc_tag(bitmap); - spin_unlock_irqrestore(hba->host->host_lock, flags); - return tag; -} - -static void __stex_free_tag(unsigned long *bitmap, u16 tag) -{ - __clear_bit((int)tag, bitmap); -} - -static void stex_free_tag(struct st_hba *hba, unsigned long *bitmap, u16 tag) -{ - unsigned long flags; - - spin_lock_irqsave(hba->host->host_lock, flags); - __stex_free_tag(bitmap, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); -} - static struct status_msg *stex_get_status(struct st_hba *hba) { struct status_msg *status = @@ -535,57 +500,31 @@ stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) } static int +stex_slave_alloc(struct scsi_device *sdev) +{ + /* Cheat: usually extracted from Inquiry data */ + sdev->tagged_supported = 1; + + scsi_activate_tcq(sdev, sdev->host->can_queue); + + return 0; +} + +static int stex_slave_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; sdev->timeout = 60 * HZ; + sdev->tagged_supported = 1; + return 0; } static void stex_slave_destroy(struct scsi_device *sdev) { - struct st_hba *hba = (struct st_hba *) sdev->host->hostdata; - struct req_msg *req; - unsigned long flags; - unsigned long before; - u16 tag; - - if (sdev->type != TYPE_DISK) - return; - - before = jiffies; - while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag)) - == TAG_BITMAP_LENGTH) { - if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) - return; - msleep(10); - } - - spin_lock_irqsave(hba->host->host_lock, flags); - req = stex_alloc_req(hba); - memset(req->cdb, 0, STEX_CDB_LENGTH); - - req->target = sdev->id; - req->lun = sdev->channel; /* firmware lun issue work around */ - req->cdb[0] = SYNCHRONIZE_CACHE; - - hba->ccb[tag].cmd = NULL; - hba->ccb[tag].sg_count = 0; - hba->ccb[tag].sense_bufflen = 0; - hba->ccb[tag].sense_buffer = NULL; - hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE; - - stex_send_cmd(hba, req, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); - - wait_event_timeout(hba->waitq, - !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); - if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) - return; - - stex_free_tag(hba, (unsigned long *)&hba->tag, tag); + scsi_deactivate_tcq(sdev, 1); } static int @@ -650,8 +589,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) cmd->scsi_done = done; - if (unlikely((tag = __stex_alloc_tag((unsigned long *)&hba->tag)) - == TAG_BITMAP_LENGTH)) + tag = cmd->request->tag; + + if (unlikely(tag >= host->can_queue)) return SCSI_MLQUEUE_HOST_BUSY; req = stex_alloc_req(hba); @@ -771,26 +711,18 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell) while (hba->status_tail != hba->status_head) { resp = stex_get_status(hba); tag = le16_to_cpu(resp->tag); - if (unlikely(tag >= TAG_BITMAP_LENGTH)) { + if (unlikely(tag >= hba->host->can_queue)) { printk(KERN_WARNING DRV_NAME "(%s): invalid tag\n", pci_name(hba->pdev)); continue; } - if (unlikely((hba->tag & (1 << tag)) == 0)) { - printk(KERN_WARNING DRV_NAME - "(%s): null tag\n", pci_name(hba->pdev)); - continue; - } - hba->out_req_cnt--; ccb = &hba->ccb[tag]; if (hba->wait_ccb == ccb) hba->wait_ccb = NULL; if (unlikely(ccb->req == NULL)) { printk(KERN_WARNING DRV_NAME "(%s): lagging req\n", pci_name(hba->pdev)); - __stex_free_tag((unsigned long *)&hba->tag, tag); - stex_unmap_sg(hba, ccb->cmd); /* ??? */ continue; } @@ -808,7 +740,15 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell) ccb->srb_status = resp->srb_status; ccb->scsi_status = resp->scsi_status; - if (ccb->req_type & PASSTHRU_REQ_TYPE) { + if (likely(ccb->cmd != NULL)) { + if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD && + ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) + stex_controller_info(hba, ccb); + stex_unmap_sg(hba, ccb->cmd); + stex_scsi_done(ccb); + hba->out_req_cnt--; + } else if (ccb->req_type & PASSTHRU_REQ_TYPE) { + hba->out_req_cnt--; if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) { ccb->req_type = 0; continue; @@ -816,14 +756,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell) ccb->req_type = 0; if (waitqueue_active(&hba->waitq)) wake_up(&hba->waitq); - continue; } - if (ccb->cmd->cmnd[0] == PASSTHRU_CMD && - ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER) - stex_controller_info(hba, ccb); - __stex_free_tag((unsigned long *)&hba->tag, tag); - stex_unmap_sg(hba, ccb->cmd); - stex_scsi_done(ccb); } update_status: @@ -933,21 +866,24 @@ static int stex_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct st_hba *hba = (struct st_hba *)host->hostdata; - u16 tag; + u16 tag = cmd->request->tag; void __iomem *base; u32 data; int result = SUCCESS; unsigned long flags; base = hba->mmio_base; spin_lock_irqsave(host->host_lock, flags); - - for (tag = 0; tag < MU_MAX_REQUEST; tag++) - if (hba->ccb[tag].cmd == cmd && (hba->tag & (1 << tag))) { - hba->wait_ccb = &(hba->ccb[tag]); - break; - } - if (tag >= MU_MAX_REQUEST) - goto out; + if (tag < host->can_queue && hba->ccb[tag].cmd == cmd) + hba->wait_ccb = &hba->ccb[tag]; + else { + for (tag = 0; tag < host->can_queue; tag++) + if (hba->ccb[tag].cmd == cmd) { + hba->wait_ccb = &hba->ccb[tag]; + break; + } + if (tag >= host->can_queue) + goto out; + } data = readl(base + ODBL); if (data == 0 || data == 0xffffffff) @@ -965,6 +901,7 @@ static int stex_abort(struct scsi_cmnd *cmd) } fail_out: + stex_unmap_sg(hba, cmd); hba->wait_ccb->req = NULL; /* nullify the req's future return */ hba->wait_ccb = NULL; result = FAILED; @@ -1025,7 +962,6 @@ static int stex_reset(struct scsi_cmnd *cmd) return FAILED; } spin_lock_irqsave(hba->host->host_lock, flags); - hba->tag = 0; hba->req_head = 0; hba->req_tail = 0; hba->status_head = 0; @@ -1061,6 +997,7 @@ static struct scsi_host_template driver_template = { .proc_name = DRV_NAME, .bios_param = stex_biosparam, .queuecommand = stex_queuecommand, + .slave_alloc = stex_slave_alloc, .slave_configure = stex_slave_config, .slave_destroy = stex_slave_destroy, .eh_abort_handler = stex_abort, @@ -1171,6 +1108,14 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto out_free_irq; + scsi_init_shared_tag_map(host, ST_CAN_QUEUE); + if (host->bqt == NULL) { + err = -ENOMEM; + printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n", + pci_name(pdev)); + goto out_free_irq; + } + pci_set_drvdata(pdev, hba); err = scsi_add_host(host, &pdev->dev); @@ -1206,15 +1151,7 @@ static void stex_hba_stop(struct st_hba *hba) struct req_msg *req; unsigned long flags; unsigned long before; - u16 tag; - - before = jiffies; - while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag)) - == TAG_BITMAP_LENGTH) { - if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) - return; - msleep(10); - } + u16 tag = 0; spin_lock_irqsave(hba->host->host_lock, flags); req = stex_alloc_req(hba); @@ -1233,12 +1170,12 @@ static void stex_hba_stop(struct st_hba *hba) stex_send_cmd(hba, req, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); - wait_event_timeout(hba->waitq, - !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); - if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) - return; - - stex_free_tag(hba, (unsigned long *)&hba->tag, tag); + before = jiffies; + while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { + if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) + return; + msleep(10); + } } static void stex_hba_free(struct st_hba *hba) -- cgit v0.10.2 From deb81d80ba27da8dfabc29ccb5977db8f4942a0a Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Fri, 1 Sep 2006 09:28:48 -0400 Subject: [SCSI] add failure return to scsi_init_shared_tag_map() And use it in the stex driver. Signed-off-by: James Bottomley diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 15fb99f..3cf3106 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -1108,9 +1108,8 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto out_free_irq; - scsi_init_shared_tag_map(host, ST_CAN_QUEUE); - if (host->bqt == NULL) { - err = -ENOMEM; + err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE); + if (err) { printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n", pci_name(pdev)); goto out_free_irq; diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index 4eea254..d04d05a 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -138,9 +138,10 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) * @shost: the host to share the tag map among all devices * @depth: the total depth of the map */ -static inline void scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) +static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) { shost->bqt = blk_init_tags(depth); + return shost->bqt ? 0 : -ENOMEM; } #endif /* _SCSI_SCSI_TCQ_H */ -- cgit v0.10.2 From 84314fd4740ad73550c76dee4a9578979d84af48 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:30:09 -0400 Subject: [SCSI] SCSI and FC Transport: add netlink support for posting of transport events This patch formally adds support for the posting of FC events via netlink. It is a followup to the original RFC at: http://marc.theaimsgroup.com/?l=linux-scsi&m=114530667923464&w=2 and the initial posting at: http://marc.theaimsgroup.com/?l=linux-scsi&m=115507374832500&w=2 The patch has been updated to optimize the send path, per the discussions in the initial posting. Per discussions at the Storage Summit and at OLS, we are to use netlink for async events from transports. Also per discussions, to avoid a netlink protocol per transport, I've create a single NETLINK_SCSITRANSPORT protocol, which can then be used by all transports. This patch: - Creates new files scsi_netlink.c and scsi_netlink.h, which contains the single and shared definitions for the SCSI Transport. It is tied into the base SCSI subsystem intialization. Contains a single interface routine, scsi_send_transport_event(), for a transport to send an event (via multicast to a protocol specific group). - Creates a new scsi_netlink_fc.h file, which contains the FC netlink event messages - Adds 3 new routines to the fc transport: fc_get_event_number() - to get a FC event # fc_host_post_event() - to send a simple FC event (32 bits of data) fc_host_post_vendor_event() - to send a Vendor unique event, with arbitrary amounts of data. Note: the separation of event number allows for a LLD to send a standard event, followed by vendor-specific data for the event. Note: This patch assumes 2 prior fc transport patches have been installed: http://marc.theaimsgroup.com/?l=linux-scsi&m=115555807316329&w=2 http://marc.theaimsgroup.com/?l=linux-scsi&m=115581614930261&w=2 Sorry - next time I'll do something like making these individual patches of the same posting when I know they'll be posted closely together. Signed-off-by: James Smart Tidy up configuration not to make SCSI always select NET Signed-off-by: James Bottomley diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index c8c6065..4d1998d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -27,6 +27,11 @@ config SCSI However, do not compile this as a module if your root file system (the one containing the directory /) is located on a SCSI device. +config SCSI_NETLINK + tristate + default n + select NET + config SCSI_PROC_FS bool "legacy /proc/scsi/ support" depends on SCSI && PROC_FS @@ -222,6 +227,7 @@ config SCSI_SPI_ATTRS config SCSI_FC_ATTRS tristate "FiberChannel Transport Attributes" depends on SCSI + select SCSI_NETLINK help If you wish to export transport-specific information about each attached FiberChannel device to sysfs, say Y. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index fd9aeb1..8fc2c59 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -159,6 +159,7 @@ scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ scsicam.o scsi_error.o scsi_lib.o \ scsi_scan.o scsi_sysfs.o \ scsi_devinfo.o +scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3784392..eedfd05 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1118,6 +1118,8 @@ static int __init init_scsi(void) for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); + scsi_netlink_init(); + printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; @@ -1138,6 +1140,7 @@ cleanup_queue: static void __exit exit_scsi(void) { + scsi_netlink_exit(); scsi_sysfs_unregister(); scsi_exit_sysctl(); scsi_exit_hosts(); diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c new file mode 100644 index 0000000..1b59b27 --- /dev/null +++ b/drivers/scsi/scsi_netlink.c @@ -0,0 +1,199 @@ +/* + * scsi_netlink.c - SCSI Transport Netlink Interface + * + * Copyright (C) 2006 James Smart, Emulex Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include + +#include +#include "scsi_priv.h" + +struct sock *scsi_nl_sock = NULL; +EXPORT_SYMBOL_GPL(scsi_nl_sock); + + +/** + * scsi_nl_rcv_msg - + * Receive message handler. Extracts message from a receive buffer. + * Validates message header and calls appropriate transport message handler + * + * @skb: socket receive buffer + * + **/ +static void +scsi_nl_rcv_msg(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + struct scsi_nl_hdr *hdr; + uint32_t rlen; + int err; + + while (skb->len >= NLMSG_SPACE(0)) { + err = 0; + + nlh = (struct nlmsghdr *) skb->data; + if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || + (skb->len < nlh->nlmsg_len)) { + printk(KERN_WARNING "%s: discarding partial skb\n", + __FUNCTION__); + return; + } + + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; + + if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { + err = -EBADMSG; + goto next_msg; + } + + hdr = NLMSG_DATA(nlh); + if ((hdr->version != SCSI_NL_VERSION) || + (hdr->magic != SCSI_NL_MAGIC)) { + err = -EPROTOTYPE; + goto next_msg; + } + + if (security_netlink_recv(skb, CAP_SYS_ADMIN)) { + err = -EPERM; + goto next_msg; + } + + if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { + printk(KERN_WARNING "%s: discarding partial message\n", + __FUNCTION__); + return; + } + + /* + * We currently don't support anyone sending us a message + */ + +next_msg: + if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) + netlink_ack(skb, nlh, err); + + skb_pull(skb, rlen); + } +} + + +/** + * scsi_nl_rcv_msg - + * Receive handler for a socket. Extracts a received message buffer from + * the socket, and starts message processing. + * + * @sk: socket + * @len: unused + * + **/ +static void +scsi_nl_rcv(struct sock *sk, int len) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + scsi_nl_rcv_msg(skb); + kfree_skb(skb); + } +} + + +/** + * scsi_nl_rcv_event - + * Event handler for a netlink socket. + * + * @this: event notifier block + * @event: event type + * @ptr: event payload + * + **/ +static int +scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct netlink_notify *n = ptr; + + if (n->protocol != NETLINK_SCSITRANSPORT) + return NOTIFY_DONE; + + /* + * Currently, we are not tracking PID's, etc. There is nothing + * to handle. + */ + + return NOTIFY_DONE; +} + +static struct notifier_block scsi_netlink_notifier = { + .notifier_call = scsi_nl_rcv_event, +}; + + +/** + * scsi_netlink_init - + * Called by SCSI subsystem to intialize the SCSI transport netlink + * interface + * + **/ +void +scsi_netlink_init(void) +{ + int error; + + error = netlink_register_notifier(&scsi_netlink_notifier); + if (error) { + printk(KERN_ERR "%s: register of event handler failed - %d\n", + __FUNCTION__, error); + return; + } + + scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, + SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE); + if (!scsi_nl_sock) { + printk(KERN_ERR "%s: register of recieve handler failed\n", + __FUNCTION__); + netlink_unregister_notifier(&scsi_netlink_notifier); + } + + return; +} + + +/** + * scsi_netlink_exit - + * Called by SCSI subsystem to disable the SCSI transport netlink + * interface + * + **/ +void +scsi_netlink_exit(void) +{ + if (scsi_nl_sock) { + sock_release(scsi_nl_sock->sk_socket); + netlink_unregister_notifier(&scsi_netlink_notifier); + } + + return; +} + + diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index ae24c85..5d023d4 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -8,6 +8,7 @@ struct scsi_cmnd; struct scsi_device; struct scsi_host_template; struct Scsi_Host; +struct scsi_nl_hdr; /* @@ -110,6 +111,16 @@ extern void __scsi_remove_device(struct scsi_device *); extern struct bus_type scsi_bus_type; +/* scsi_netlink.c */ +#ifdef CONFIG_SCSI_NETLINK +extern void scsi_netlink_init(void); +extern void scsi_netlink_exit(void); +extern struct sock *scsi_nl_sock; +#else +static inline void scsi_netlink_init(void) {} +static inline void scsi_netlink_exit(void) {} +#endif + /* * internal scsi timeout functions: for use by mid-layer and transport * classes. diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 79d31ca..05989f1 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -32,6 +32,9 @@ #include #include #include +#include +#include +#include #include "scsi_priv.h" static int fc_queue_work(struct Scsi_Host *, struct work_struct *); @@ -93,6 +96,29 @@ fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) #define FC_PORTTYPE_MAX_NAMELEN 50 +/* Convert fc_host_event_code values to ascii string name */ +static const struct { + enum fc_host_event_code value; + char *name; +} fc_host_event_code_names[] = { + { FCH_EVT_LIP, "lip" }, + { FCH_EVT_LINKUP, "link_up" }, + { FCH_EVT_LINKDOWN, "link_down" }, + { FCH_EVT_LIPRESET, "lip_reset" }, + { FCH_EVT_RSCN, "rscn" }, + { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" }, + { FCH_EVT_PORT_UNKNOWN, "port_unknown" }, + { FCH_EVT_PORT_ONLINE, "port_online" }, + { FCH_EVT_PORT_OFFLINE, "port_offline" }, + { FCH_EVT_PORT_FABRIC, "port_fabric" }, + { FCH_EVT_LINK_UNKNOWN, "link_unknown" }, + { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" }, +}; +fc_enum_name_search(host_event_code, fc_host_event_code, + fc_host_event_code_names) +#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30 + + /* Convert fc_port_state values to ascii string name */ static struct { enum fc_port_state value; @@ -377,10 +403,182 @@ MODULE_PARM_DESC(dev_loss_tmo, " exceeded, the scsi target is removed. Value should be" " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); +/** + * Netlink Infrastructure + **/ + +static atomic_t fc_event_seq; + +/** + * fc_get_event_number - Obtain the next sequential FC event number + * + * Notes: + * We could have inline'd this, but it would have required fc_event_seq to + * be exposed. For now, live with the subroutine call. + * Atomic used to avoid lock/unlock... + **/ +u32 +fc_get_event_number(void) +{ + return atomic_add_return(1, &fc_event_seq); +} +EXPORT_SYMBOL(fc_get_event_number); + + +/** + * fc_host_post_event - called to post an even on an fc_host. + * + * @shost: host the event occurred on + * @event_number: fc event number obtained from get_fc_event_number() + * @event_code: fc_host event being posted + * @event_data: 32bits of data for the event being posted + * + * Notes: + * This routine assumes no locks are held on entry. + **/ +void +fc_host_post_event(struct Scsi_Host *shost, u32 event_number, + enum fc_host_event_code event_code, u32 event_data) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + struct fc_nl_event *event; + const char *name; + u32 len, skblen; + int err; + + if (!scsi_nl_sock) { + err = -ENOENT; + goto send_fail; + } + + len = FC_NL_MSGALIGN(sizeof(*event)); + skblen = NLMSG_SPACE(len); + + skb = alloc_skb(skblen, GFP_KERNEL); + if (!skb) { + err = -ENOBUFS; + goto send_fail; + } + + nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, + skblen - sizeof(*nlh), 0); + if (!nlh) { + err = -ENOBUFS; + goto send_fail_skb; + } + event = NLMSG_DATA(nlh); + + INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, + FC_NL_ASYNC_EVENT, len); + event->seconds = get_seconds(); + event->vendor_id = 0; + event->host_no = shost->host_no; + event->event_datalen = sizeof(u32); /* bytes */ + event->event_num = event_number; + event->event_code = event_code; + event->event_data = event_data; + + err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS); + if (err && (err != -ESRCH)) /* filter no recipient errors */ + /* nlmsg_multicast already kfree_skb'd */ + goto send_fail; + + return; + +send_fail_skb: + kfree_skb(skb); +send_fail: + name = get_fc_host_event_code_name(event_code); + printk(KERN_WARNING + "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", + __FUNCTION__, shost->host_no, + (name) ? name : "", event_data, err); + return; +} +EXPORT_SYMBOL(fc_host_post_event); + + +/** + * fc_host_post_vendor_event - called to post a vendor unique event on + * a fc_host + * + * @shost: host the event occurred on + * @event_number: fc event number obtained from get_fc_event_number() + * @data_len: amount, in bytes, of vendor unique data + * @data_buf: pointer to vendor unique data + * + * Notes: + * This routine assumes no locks are held on entry. + **/ +void +fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, + u32 data_len, char * data_buf, u32 vendor_id) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + struct fc_nl_event *event; + u32 len, skblen; + int err; + + if (!scsi_nl_sock) { + err = -ENOENT; + goto send_vendor_fail; + } + + len = FC_NL_MSGALIGN(sizeof(*event) + data_len); + skblen = NLMSG_SPACE(len); + + skb = alloc_skb(skblen, GFP_KERNEL); + if (!skb) { + err = -ENOBUFS; + goto send_vendor_fail; + } + + nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, + skblen - sizeof(*nlh), 0); + if (!nlh) { + err = -ENOBUFS; + goto send_vendor_fail_skb; + } + event = NLMSG_DATA(nlh); + + INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, + FC_NL_ASYNC_EVENT, len); + event->seconds = get_seconds(); + event->vendor_id = vendor_id; + event->host_no = shost->host_no; + event->event_datalen = data_len; /* bytes */ + event->event_num = event_number; + event->event_code = FCH_EVT_VENDOR_UNIQUE; + memcpy(&event->event_data, data_buf, data_len); + + err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS); + if (err && (err != -ESRCH)) /* filter no recipient errors */ + /* nlmsg_multicast already kfree_skb'd */ + goto send_vendor_fail; + + return; + +send_vendor_fail_skb: + kfree_skb(skb); +send_vendor_fail: + printk(KERN_WARNING + "%s: Dropped Event : host %d vendor_unique - err %d\n", + __FUNCTION__, shost->host_no, err); + return; +} +EXPORT_SYMBOL(fc_host_post_vendor_event); + + static __init int fc_transport_init(void) { - int error = transport_class_register(&fc_host_class); + int error; + + atomic_set(&fc_event_seq, 0); + + error = transport_class_register(&fc_host_class); if (error) return error; error = transport_class_register(&fc_rport_class); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 855b446..6641162 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -21,6 +21,8 @@ #define NETLINK_DNRTMSG 14 /* DECnet routing messages */ #define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */ #define NETLINK_GENERIC 16 +/* leave room for NETLINK_DM (DM Events) */ +#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define MAX_LINKS 32 diff --git a/include/scsi/scsi_netlink.h b/include/scsi/scsi_netlink.h new file mode 100644 index 0000000..7a3a20e --- /dev/null +++ b/include/scsi/scsi_netlink.h @@ -0,0 +1,86 @@ +/* + * SCSI Transport Netlink Interface + * Used for the posting of outbound SCSI transport events + * + * Copyright (C) 2006 James Smart, Emulex Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef SCSI_NETLINK_H +#define SCSI_NETLINK_H + +/* + * This file intended to be included by both kernel and user space + */ + +/* Single Netlink Message type to send all SCSI Transport messages */ +#define SCSI_TRANSPORT_MSG NLMSG_MIN_TYPE + 1 + +/* SCSI Transport Broadcast Groups */ + /* leaving groups 0 and 1 unassigned */ +#define SCSI_NL_GRP_FC_EVENTS (1<<2) /* Group 2 */ +#define SCSI_NL_GRP_CNT 3 + + +/* SCSI_TRANSPORT_MSG event message header */ +struct scsi_nl_hdr { + uint8_t version; + uint8_t transport; + uint16_t magic; + uint16_t msgtype; + uint16_t msglen; +} __attribute__((aligned(sizeof(uint64_t)))); + +/* scsi_nl_hdr->version value */ +#define SCSI_NL_VERSION 1 + +/* scsi_nl_hdr->magic value */ +#define SCSI_NL_MAGIC 0xA1B2 + +/* scsi_nl_hdr->transport value */ +#define SCSI_NL_TRANSPORT 0 +#define SCSI_NL_TRANSPORT_FC 1 +#define SCSI_NL_MAX_TRANSPORTS 2 + +/* scsi_nl_hdr->msgtype values are defined in each transport */ + + +/* + * Vendor ID: + * If transports post vendor-unique events, they must pass a well-known + * 32-bit vendor identifier. This identifier consists of 8 bits indicating + * the "type" of identifier contained, and 24 bits of id data. + * + * Identifiers for each type: + * PCI : ID data is the 16 bit PCI Registered Vendor ID + */ +#define SCSI_NL_VID_ID_MASK 0x00FFFFFF +#define SCSI_NL_VID_TYPE_MASK 0xFF000000 +#define SCSI_NL_VID_TYPE_PCI 0x01000000 + + +#define INIT_SCSI_NL_HDR(hdr, t, mtype, mlen) \ + { \ + (hdr)->version = SCSI_NL_VERSION; \ + (hdr)->transport = t; \ + (hdr)->magic = SCSI_NL_MAGIC; \ + (hdr)->msgtype = mtype; \ + (hdr)->msglen = mlen; \ + } + + +#endif /* SCSI_NETLINK_H */ + diff --git a/include/scsi/scsi_netlink_fc.h b/include/scsi/scsi_netlink_fc.h new file mode 100644 index 0000000..b213d29 --- /dev/null +++ b/include/scsi/scsi_netlink_fc.h @@ -0,0 +1,71 @@ +/* + * FC Transport Netlink Interface + * + * Copyright (C) 2006 James Smart, Emulex Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef SCSI_NETLINK_FC_H +#define SCSI_NETLINK_FC_H + +#include + +/* + * This file intended to be included by both kernel and user space + */ + +/* + * FC Transport Message Types + */ + /* kernel -> user */ +#define FC_NL_ASYNC_EVENT 0x0100 + /* user -> kernel */ +/* none */ + + +/* + * Message Structures : + */ + +/* macro to round up message lengths to 8byte boundary */ +#define FC_NL_MSGALIGN(len) (((len) + 7) & ~7) + + +/* + * FC Transport Broadcast Event Message : + * FC_NL_ASYNC_EVENT + * + * Note: if Vendor Unique message, &event_data will be start of + * vendor unique payload, and the length of the payload is + * per event_datalen + * + * Note: When specifying vendor_id, be sure to read the Vendor Type and ID + * formatting requirements specified in scsi_netlink.h + */ +struct fc_nl_event { + struct scsi_nl_hdr snlh; /* must be 1st element ! */ + uint64_t seconds; + uint32_t vendor_id; + uint16_t host_no; + uint16_t event_datalen; + uint32_t event_num; + uint32_t event_code; + uint32_t event_data; +} __attribute__((aligned(sizeof(uint64_t)))); + + +#endif /* SCSI_NETLINK_FC_H */ + diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index c74be5d..f91c535 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -29,6 +29,7 @@ #include #include +#include struct scsi_transport_template; @@ -284,6 +285,30 @@ struct fc_host_statistics { /* + * FC Event Codes - Polled and Async, following FC HBAAPI v2.0 guidelines + */ + +/* + * fc_host_event_code: If you alter this, you also need to alter + * scsi_transport_fc.c (for the ascii descriptions). + */ +enum fc_host_event_code { + FCH_EVT_LIP = 0x1, + FCH_EVT_LINKUP = 0x2, + FCH_EVT_LINKDOWN = 0x3, + FCH_EVT_LIPRESET = 0x4, + FCH_EVT_RSCN = 0x5, + FCH_EVT_ADAPTER_CHANGE = 0x103, + FCH_EVT_PORT_UNKNOWN = 0x200, + FCH_EVT_PORT_OFFLINE = 0x201, + FCH_EVT_PORT_ONLINE = 0x202, + FCH_EVT_PORT_FABRIC = 0x204, + FCH_EVT_LINK_UNKNOWN = 0x500, + FCH_EVT_VENDOR_UNIQUE = 0xffff, +}; + + +/* * FC Local Port (Host) Attributes * * Attributes are based on HBAAPI V2.0 definitions. @@ -526,5 +551,14 @@ struct fc_rport *fc_remote_port_add(struct Scsi_Host *shost, void fc_remote_port_delete(struct fc_rport *rport); void fc_remote_port_rolechg(struct fc_rport *rport, u32 roles); int scsi_is_fc_rport(const struct device *); +u32 fc_get_event_number(void); +void fc_host_post_event(struct Scsi_Host *shost, u32 event_number, + enum fc_host_event_code event_code, u32 event_data); +void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, + u32 data_len, char * data_buf, u32 vendor_id); + /* Note: when specifying vendor_id to fc_host_post_vendor_event() + * be sure to read the Vendor Type and ID formatting requirements + * specified in scsi_netlink.h + */ #endif /* SCSI_TRANSPORT_FC_H */ -- cgit v0.10.2 From f14e2e29cdd07f80de6dec168dc2bb39de37eec3 Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 22 Aug 2006 09:55:23 -0400 Subject: [SCSI] SCSI & FC transport: extend event vendor id's to 64bits During discussions with Mike Christie, I became convinced that we needed a larger vendor id. This patch extends the id from 32 to 64 bits. This applies on top of the prior patches that add SCSI transport events via netlink. Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 05989f1..293188c 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -513,7 +513,7 @@ EXPORT_SYMBOL(fc_host_post_event); **/ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, - u32 data_len, char * data_buf, u32 vendor_id) + u32 data_len, char * data_buf, u64 vendor_id) { struct sk_buff *skb; struct nlmsghdr *nlh; diff --git a/include/scsi/scsi_netlink.h b/include/scsi/scsi_netlink.h index 7a3a20e..8c1470c 100644 --- a/include/scsi/scsi_netlink.h +++ b/include/scsi/scsi_netlink.h @@ -67,9 +67,10 @@ struct scsi_nl_hdr { * Identifiers for each type: * PCI : ID data is the 16 bit PCI Registered Vendor ID */ -#define SCSI_NL_VID_ID_MASK 0x00FFFFFF -#define SCSI_NL_VID_TYPE_MASK 0xFF000000 -#define SCSI_NL_VID_TYPE_PCI 0x01000000 +#define SCSI_NL_VID_TYPE_SHIFT 56 +#define SCSI_NL_VID_TYPE_MASK ((u64)0xFF << SCSI_NL_VID_TYPE_SHIFT) +#define SCSI_NL_VID_TYPE_PCI ((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) +#define SCSI_NL_VID_ID_MASK (~ SCSI_NL_VID_TYPE_MASK) #define INIT_SCSI_NL_HDR(hdr, t, mtype, mlen) \ diff --git a/include/scsi/scsi_netlink_fc.h b/include/scsi/scsi_netlink_fc.h index b213d29..cbf76e4 100644 --- a/include/scsi/scsi_netlink_fc.h +++ b/include/scsi/scsi_netlink_fc.h @@ -58,7 +58,7 @@ struct fc_nl_event { struct scsi_nl_hdr snlh; /* must be 1st element ! */ uint64_t seconds; - uint32_t vendor_id; + uint64_t vendor_id; uint16_t host_no; uint16_t event_datalen; uint32_t event_num; diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index f91c535..0b11eff 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -555,7 +555,7 @@ u32 fc_get_event_number(void); void fc_host_post_event(struct Scsi_Host *shost, u32 event_number, enum fc_host_event_code event_code, u32 event_data); void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, - u32 data_len, char * data_buf, u32 vendor_id); + u32 data_len, char * data_buf, u64 vendor_id); /* Note: when specifying vendor_id to fc_host_post_vendor_event() * be sure to read the Vendor Type and ID formatting requirements * specified in scsi_netlink.h -- cgit v0.10.2 From d2873e4c1ef293ee6d66456fb84448e258a487fa Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:46:43 -0400 Subject: [SCSI] lpfc 8.1.10 : Add support to post events via new FC event interfaces Add support to post events via new FC event interfaces Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index d44f9aa..4a4048d 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -391,3 +391,5 @@ struct rnidrsp { struct list_head list; uint32_t data; }; + +#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3567de61..71864cdc 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2506,6 +2506,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba, uint32_t *lp; IOCB_t *icmd; uint32_t payload_len, cmd; + int i; icmd = &cmdiocb->iocb; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; @@ -2524,6 +2525,10 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba, phba->brd_no, phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt); + for (i = 0; i < payload_len/sizeof(uint32_t); i++) + fc_host_post_event(phba->host, fc_get_event_number(), + FCH_EVT_RSCN, lp[i]); + /* If we are about to begin discovery, just ACC the RSCN. * Discovery processing will satisfy it. */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index b2f1552..53821e5 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -340,6 +340,9 @@ lpfc_linkdown(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); } + fc_host_post_event(phba->host, fc_get_event_number(), + FCH_EVT_LINKDOWN, 0); + /* Clean up any firmware default rpi's */ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { lpfc_unreg_did(phba, 0xffffffff, mb); @@ -427,6 +430,9 @@ lpfc_linkup(struct lpfc_hba * phba) struct list_head *listp, *node_list[7]; int i; + fc_host_post_event(phba->host, fc_get_event_number(), + FCH_EVT_LINKUP, 0); + spin_lock_irq(phba->host->host_lock); phba->hba_state = LPFC_LINK_UP; phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f6948ff..84e7fc5 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -511,6 +511,7 @@ lpfc_handle_eratt(struct lpfc_hba * phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; + uint32_t event_data; if (phba->work_hs & HS_FFER6) { /* Re-establishing Link */ @@ -555,6 +556,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba) phba->brd_no, phba->work_hs, phba->work_status[0], phba->work_status[1]); + event_data = FC_REG_DUMP_EVENT; + fc_host_post_vendor_event(phba->host, fc_get_event_number(), + sizeof(event_data), (char *) &event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; lpfc_offline(phba); phba->hba_state = LPFC_HBA_ERROR; -- cgit v0.10.2 From ae36764a230ff6a278ed93735acf5fcda08f2786 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:46:53 -0400 Subject: [SCSI] lpfc 8.1.10 : Add support to return adapter symbolic name Add support to return adapter symbolic name (now that attribute is dynamic) Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index d384c16..c6d683d 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1204,6 +1204,15 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost) fc_host_fabric_name(shost) = node_name; } +static void +lpfc_get_host_symbolic_name (struct Scsi_Host *shost) +{ + struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; + + spin_lock_irq(shost->host_lock); + lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost)); + spin_unlock_irq(shost->host_lock); +} static struct fc_host_statistics * lpfc_get_stats(struct Scsi_Host *shost) @@ -1486,7 +1495,6 @@ struct fc_function_template lpfc_transport_functions = { .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, - .show_host_symbolic_name = 1, .show_host_supported_speeds = 1, .show_host_maxframe_size = 1, @@ -1509,6 +1517,9 @@ struct fc_function_template lpfc_transport_functions = { .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, + .get_host_symbolic_name = lpfc_get_host_symbolic_name, + .show_host_symbolic_name = 1, + /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. -- cgit v0.10.2 From 0f29b966d60e9a4f5ecff9f3832257b38aea4f13 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:33:29 -0400 Subject: [SCSI] FC transport: Add dev_loss_tmo callbacks, and new fast_io_fail_tmo w/ callback This patch adds the following functionality to the FC transport: - dev_loss_tmo LLDD callback : Called to essentially confirm the deletion of an rport. Thus, it is called whenever the dev_loss_tmo fires, or when the rport is deleted due to other circumstances (module unload, etc). It is expected that the callback will initiate the termination of any outstanding i/o on the rport. - fast_io_fail_tmo and LLD callback: There are some cases where it may take a long while to truly determine device loss, but the system is in a multipathing configuration that if the i/o was failed quickly (faster than dev_loss_tmo), it could be redirected to a different path and completed sooner. Many thanks to Mike Reed who cleaned up the initial RFC in support of this post. The original RFC is at: http://marc.theaimsgroup.com/?l=linux-scsi&m=115505981027246&w=2 Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 293188c..4ab176e 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -242,6 +242,7 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) static void fc_timeout_deleted_rport(void *data); +static void fc_timeout_fail_rport_io(void *data); static void fc_scsi_scan_rport(void *data); /* @@ -249,7 +250,7 @@ static void fc_scsi_scan_rport(void *data); * Increase these values if you add attributes */ #define FC_STARGET_NUM_ATTRS 3 -#define FC_RPORT_NUM_ATTRS 9 +#define FC_RPORT_NUM_ATTRS 10 #define FC_HOST_NUM_ATTRS 17 struct fc_internal { @@ -622,11 +623,14 @@ store_fc_rport_##field(struct class_device *cdev, const char *buf, \ struct fc_rport *rport = transport_class_to_rport(cdev); \ struct Scsi_Host *shost = rport_to_shost(rport); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ + char *cp; \ if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ (rport->port_state == FC_PORTSTATE_DELETED) || \ (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ return -EBUSY; \ - val = simple_strtoul(buf, NULL, 0); \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp && (*cp != '\n')) \ + return -EINVAL; \ i->f->set_rport_##field(rport, val); \ return count; \ } @@ -708,6 +712,13 @@ static FC_CLASS_DEVICE_ATTR(rport, title, S_IRUGO, \ if (i->f->show_rport_##field) \ count++ +#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \ +{ \ + i->private_rport_attrs[count] = class_device_attr_rport_##field; \ + i->rport_attrs[count] = &i->private_rport_attrs[count]; \ + count++; \ +} + /* The FC Transport Remote Port Attributes: */ @@ -740,12 +751,14 @@ store_fc_rport_dev_loss_tmo(struct class_device *cdev, const char *buf, struct fc_rport *rport = transport_class_to_rport(cdev); struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); + char *cp; if ((rport->port_state == FC_PORTSTATE_BLOCKED) || (rport->port_state == FC_PORTSTATE_DELETED) || (rport->port_state == FC_PORTSTATE_NOTPRESENT)) return -EBUSY; - val = simple_strtoul(buf, NULL, 0); - if ((val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) + val = simple_strtoul(buf, &cp, 0); + if ((*cp && (*cp != '\n')) || + (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) return -EINVAL; i->f->set_rport_dev_loss_tmo(rport, val); return count; @@ -795,6 +808,44 @@ static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO, fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); +/* + * fast_io_fail_tmo attribute + */ +static ssize_t +show_fc_rport_fast_io_fail_tmo (struct class_device *cdev, char *buf) +{ + struct fc_rport *rport = transport_class_to_rport(cdev); + + if (rport->fast_io_fail_tmo == -1) + return snprintf(buf, 5, "off\n"); + return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo); +} + +static ssize_t +store_fc_rport_fast_io_fail_tmo(struct class_device *cdev, const char *buf, + size_t count) +{ + int val; + char *cp; + struct fc_rport *rport = transport_class_to_rport(cdev); + + if ((rport->port_state == FC_PORTSTATE_BLOCKED) || + (rport->port_state == FC_PORTSTATE_DELETED) || + (rport->port_state == FC_PORTSTATE_NOTPRESENT)) + return -EBUSY; + if (strncmp(buf, "off", 3) == 0) + rport->fast_io_fail_tmo = -1; + else { + val = simple_strtoul(buf, &cp, 0); + if ((*cp && (*cp != '\n')) || + (val < 0) || (val >= rport->dev_loss_tmo)) + return -EINVAL; + rport->fast_io_fail_tmo = val; + } + return count; +} +static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo); /* @@ -880,8 +931,11 @@ store_fc_host_##field(struct class_device *cdev, const char *buf, \ int val; \ struct Scsi_Host *shost = transport_class_to_shost(cdev); \ struct fc_internal *i = to_fc_internal(shost->transportt); \ + char *cp; \ \ - val = simple_strtoul(buf, NULL, 0); \ + val = simple_strtoul(buf, &cp, 0); \ + if (*cp && (*cp != '\n')) \ + return -EINVAL; \ i->f->set_host_##field(shost, val); \ return count; \ } @@ -1481,6 +1535,8 @@ fc_attach_transport(struct fc_function_template *ft) SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); + if (ft->terminate_rport_io) + SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo); BUG_ON(count > FC_RPORT_NUM_ATTRS); @@ -1552,7 +1608,7 @@ fc_flush_work(struct Scsi_Host *shost) * @delay: jiffies to delay the work queuing * * Return value: - * 0 on success / != 0 for error + * 1 on success / 0 already queued / < 0 for error **/ static int fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, @@ -1567,6 +1623,9 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, return -EINVAL; } + if (delay == 0) + return queue_work(fc_host_devloss_work_q(shost), work); + return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); } @@ -1659,10 +1718,23 @@ fc_starget_delete(void *data) struct fc_rport *rport = (struct fc_rport *)data; struct Scsi_Host *shost = rport_to_shost(rport); unsigned long flags; + struct fc_internal *i = to_fc_internal(shost->transportt); + + /* + * Involve the LLDD if possible. All io on the rport is to + * be terminated, either as part of the dev_loss_tmo callback + * processing, or via the terminate_rport_io function. + */ + if (i->f->dev_loss_tmo_callbk) + i->f->dev_loss_tmo_callbk(rport); + else if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); spin_lock_irqsave(shost->host_lock, flags); if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { spin_unlock_irqrestore(shost->host_lock, flags); + if (!cancel_delayed_work(&rport->fail_io_work)) + fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); spin_lock_irqsave(shost->host_lock, flags); @@ -1685,10 +1757,7 @@ fc_rport_final_delete(void *data) struct fc_rport *rport = (struct fc_rport *)data; struct device *dev = &rport->dev; struct Scsi_Host *shost = rport_to_shost(rport); - - /* Delete SCSI target and sdevs */ - if (rport->scsi_target_id != -1) - fc_starget_delete(data); + struct fc_internal *i = to_fc_internal(shost->transportt); /* * if a scan is pending, flush the SCSI Host work_q so that @@ -1697,6 +1766,14 @@ fc_rport_final_delete(void *data) if (rport->flags & FC_RPORT_SCAN_PENDING) scsi_flush_work(shost); + /* Delete SCSI target and sdevs */ + if (rport->scsi_target_id != -1) + fc_starget_delete(data); + else if (i->f->dev_loss_tmo_callbk) + i->f->dev_loss_tmo_callbk(rport); + else if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); + transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); @@ -1748,8 +1825,10 @@ fc_rport_create(struct Scsi_Host *shost, int channel, if (fci->f->dd_fcrport_size) rport->dd_data = &rport[1]; rport->channel = channel; + rport->fast_io_fail_tmo = -1; INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); + INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); @@ -1913,11 +1992,13 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, /* restart the target */ /* - * Stop the target timer first. Take no action + * Stop the target timers first. Take no action * on the del_timer failure as the state * machine state change will validate the * transaction. */ + if (!cancel_delayed_work(&rport->fail_io_work)) + fc_flush_devloss(shost); if (!cancel_delayed_work(work)) fc_flush_devloss(shost); @@ -2061,6 +2142,7 @@ void fc_remote_port_delete(struct fc_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); int timeout = rport->dev_loss_tmo; unsigned long flags; @@ -2091,6 +2173,12 @@ fc_remote_port_delete(struct fc_rport *rport) scsi_target_block(&rport->dev); + /* see if we need to kill io faster than waiting for device loss */ + if ((rport->fast_io_fail_tmo != -1) && + (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io)) + fc_queue_devloss_work(shost, &rport->fail_io_work, + rport->fast_io_fail_tmo * HZ); + /* cap the length the devices can be blocked until they are deleted */ fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); } @@ -2150,6 +2238,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) * machine state change will validate the * transaction. */ + if (!cancel_delayed_work(&rport->fail_io_work)) + fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); @@ -2271,6 +2361,28 @@ fc_timeout_deleted_rport(void *data) } /** + * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a + * disconnected SCSI target. + * + * @data: rport to terminate io on. + * + * Notes: Only requests the failure of the io, not that all are flushed + * prior to returning. + **/ +static void +fc_timeout_fail_rport_io(void *data) +{ + struct fc_rport *rport = (struct fc_rport *)data; + struct Scsi_Host *shost = rport_to_shost(rport); + struct fc_internal *i = to_fc_internal(shost->transportt); + + if (rport->port_state != FC_PORTSTATE_BLOCKED) + return; + + i->f->terminate_rport_io(rport); +} + +/** * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. * * @data: remote port to be scanned. diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 0b11eff..fd35232 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -195,6 +195,7 @@ struct fc_rport { /* aka fc_starget_attrs */ u32 roles; enum fc_port_state port_state; /* Will only be ONLINE or UNKNOWN */ u32 scsi_target_id; + u32 fast_io_fail_tmo; /* exported data */ void *dd_data; /* Used for driver-specific storage */ @@ -207,6 +208,7 @@ struct fc_rport { /* aka fc_starget_attrs */ struct device dev; struct work_struct dev_loss_work; struct work_struct scan_work; + struct work_struct fail_io_work; struct work_struct stgt_delete_work; struct work_struct rport_delete_work; } __attribute__((aligned(sizeof(unsigned long)))); @@ -445,6 +447,9 @@ struct fc_function_template { int (*issue_fc_host_lip)(struct Scsi_Host *); + void (*dev_loss_tmo_callbk)(struct fc_rport *); + void (*terminate_rport_io)(struct fc_rport *); + /* allocation lengths for host-specific data */ u32 dd_fcrport_size; -- cgit v0.10.2 From c01f32087960edd60a302ad62ad6b8b525e4aeec Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:47:08 -0400 Subject: [SCSI] lpfc 8.1.10 : Add support for dev_loss_tmo_callbk and fast_io_fail_tmo_callbk Add support for new dev_loss_tmo callback Goodness is that it removes code for a parallel nodev timer that existed in the driver Add support for the new fast_io_fail callback Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 4a4048d..efec44d 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -285,6 +285,7 @@ struct lpfc_hba { uint32_t cfg_log_verbose; uint32_t cfg_lun_queue_depth; uint32_t cfg_nodev_tmo; + uint32_t cfg_devloss_tmo; uint32_t cfg_hba_queue_depth; uint32_t cfg_fcp_class; uint32_t cfg_use_adisc; @@ -303,6 +304,8 @@ struct lpfc_hba { uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; + uint32_t dev_loss_tmo_changed; + lpfc_vpd_t vpd; /* vital product data */ struct Scsi_Host *host; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c6d683d..0de6932 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -39,6 +39,9 @@ #include "lpfc_compat.h" #include "lpfc_crtn.h" +#define LPFC_DEF_DEVLOSS_TMO 30 +#define LPFC_MIN_DEVLOSS_TMO 1 +#define LPFC_MAX_DEVLOSS_TMO 255 static void lpfc_jedec_to_ascii(int incr, char hdw[]) @@ -559,6 +562,123 @@ static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); /* +# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear +# until the timer expires. Value range is [0,255]. Default value is 30. +*/ +static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; +static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO; +module_param(lpfc_nodev_tmo, int, 0); +MODULE_PARM_DESC(lpfc_nodev_tmo, + "Seconds driver will hold I/O waiting " + "for a device to come back"); +static ssize_t +lpfc_nodev_tmo_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *host = class_to_shost(cdev); + struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + int val = 0; + val = phba->cfg_devloss_tmo; + return snprintf(buf, PAGE_SIZE, "%d\n", + phba->cfg_devloss_tmo); +} + +static int +lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val) +{ + static int warned; + if (phba->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) { + phba->cfg_nodev_tmo = phba->cfg_devloss_tmo; + if (!warned && val != LPFC_DEF_DEVLOSS_TMO) { + warned = 1; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0402 Ignoring nodev_tmo module " + "parameter because devloss_tmo is" + " set.\n", + phba->brd_no); + } + return 0; + } + + if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { + phba->cfg_nodev_tmo = val; + phba->cfg_devloss_tmo = val; + return 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0400 lpfc_nodev_tmo attribute cannot be set to %d, " + "allowed range is [%d, %d]\n", + phba->brd_no, val, + LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO); + phba->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO; + return -EINVAL; +} + +static int +lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val) +{ + if (phba->dev_loss_tmo_changed || + (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0401 Ignoring change to nodev_tmo " + "because devloss_tmo is set.\n", + phba->brd_no); + return 0; + } + + if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { + phba->cfg_nodev_tmo = val; + phba->cfg_devloss_tmo = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0403 lpfc_nodev_tmo attribute cannot be set to %d, " + "allowed range is [%d, %d]\n", + phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO, + LPFC_MAX_DEVLOSS_TMO); + return -EINVAL; +} + +lpfc_param_store(nodev_tmo) + +static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, + lpfc_nodev_tmo_show, lpfc_nodev_tmo_store); + +/* +# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that +# disappear until the timer expires. Value range is [0,255]. Default +# value is 30. +*/ +module_param(lpfc_devloss_tmo, int, 0); +MODULE_PARM_DESC(lpfc_devloss_tmo, + "Seconds driver will hold I/O waiting " + "for a device to come back"); +lpfc_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO, + LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO) +lpfc_param_show(devloss_tmo) +static int +lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val) +{ + if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { + phba->cfg_nodev_tmo = val; + phba->cfg_devloss_tmo = val; + phba->dev_loss_tmo_changed = 1; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0404 lpfc_devloss_tmo attribute cannot be set to" + " %d, allowed range is [%d, %d]\n", + phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO, + LPFC_MAX_DEVLOSS_TMO); + return -EINVAL; +} + +lpfc_param_store(devloss_tmo) +static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, + lpfc_devloss_tmo_show, lpfc_devloss_tmo_store); + +/* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: @@ -617,14 +737,6 @@ LPFC_ATTR_R(scan_down, 1, 0, 1, "Start scanning for devices from highest ALPA to lowest"); /* -# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear -# until the timer expires. Value range is [0,255]. Default value is 30. -# NOTE: this MUST be less then the SCSI Layer command timeout - 1. -*/ -LPFC_ATTR_RW(nodev_tmo, 30, 0, 255, - "Seconds driver will hold I/O waiting for a device to come back"); - -/* # lpfc_topology: link topology for init link # 0x0 = attempt loop mode then point-to-point # 0x01 = internal loopback mode @@ -737,6 +849,7 @@ struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_lpfc_lun_queue_depth, &class_device_attr_lpfc_hba_queue_depth, &class_device_attr_lpfc_nodev_tmo, + &class_device_attr_lpfc_devloss_tmo, &class_device_attr_lpfc_fcp_class, &class_device_attr_lpfc_use_adisc, &class_device_attr_lpfc_ack0, @@ -1450,27 +1563,12 @@ lpfc_get_starget_port_name(struct scsi_target *starget) } static void -lpfc_get_rport_loss_tmo(struct fc_rport *rport) -{ - /* - * Return the driver's global value for device loss timeout plus - * five seconds to allow the driver's nodev timer to run. - */ - rport->dev_loss_tmo = lpfc_nodev_tmo + 5; -} - -static void lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { - /* - * The driver doesn't have a per-target timeout setting. Set - * this value globally. lpfc_nodev_tmo should be greater then 0. - */ if (timeout) - lpfc_nodev_tmo = timeout; + rport->dev_loss_tmo = timeout; else - lpfc_nodev_tmo = 1; - rport->dev_loss_tmo = lpfc_nodev_tmo + 5; + rport->dev_loss_tmo = 1; } @@ -1532,7 +1630,6 @@ struct fc_function_template lpfc_transport_functions = { .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, - .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo, .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, @@ -1546,6 +1643,8 @@ struct fc_function_template lpfc_transport_functions = { .show_starget_port_name = 1, .issue_fc_host_lip = lpfc_issue_lip, + .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, + .terminate_rport_io = lpfc_terminate_rport_io, }; void @@ -1561,13 +1660,13 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_ack0_init(phba, lpfc_ack0); lpfc_topology_init(phba, lpfc_topology); lpfc_scan_down_init(phba, lpfc_scan_down); - lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_fdmi_on_init(phba, lpfc_fdmi_on); lpfc_discovery_threads_init(phba, lpfc_discovery_threads); lpfc_max_luns_init(phba, lpfc_max_luns); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); - + lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); + lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); phba->cfg_poll = lpfc_poll; /* diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 2a17646..3d68449 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -18,6 +18,7 @@ * included with this package. * *******************************************************************/ +struct fc_rport; void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, @@ -200,6 +201,8 @@ extern struct scsi_host_template lpfc_template; extern struct fc_function_template lpfc_transport_functions; void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); +void lpfc_terminate_rport_io(struct fc_rport *); +void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index bbb7310..ae41064 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -324,7 +324,6 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size) struct lpfc_sli_ct_request *Response = (struct lpfc_sli_ct_request *) mp->virt; struct lpfc_nodelist *ndlp = NULL; - struct lpfc_nodelist *next_ndlp; struct lpfc_dmabuf *mlast, *next_mp; uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; uint32_t Did; @@ -399,30 +398,6 @@ nsout1: * current driver state. */ if (phba->hba_state == LPFC_HBA_READY) { - - /* - * Switch ports that connect a loop of multiple targets need - * special consideration. The driver wants to unregister the - * rpi only on the target that was pulled from the loop. On - * RSCN, the driver wants to rediscover an NPort only if the - * driver flagged it as NLP_NPR_2B_DISC. Provided adisc is - * not enabled and the NPort is not capable of retransmissions - * (FC Tape) prevent timing races with the scsi error handler by - * unregistering the Nport's RPI. This action causes all - * outstanding IO to flush back to the midlayer. - */ - list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, - nlp_listp) { - if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) { - if ((phba->cfg_use_adisc == 0) && - !(ndlp->nlp_fcp_info & - NLP_FCP_2_DEVICE)) { - lpfc_unreg_rpi(phba, ndlp); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - } - } - } lpfc_els_flush_rscn(phba); spin_lock_irq(phba->host->host_lock); phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 41cf5d3..9766f90 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -30,7 +30,6 @@ /* worker thread events */ enum lpfc_work_type { - LPFC_EVT_NODEV_TMO, LPFC_EVT_ONLINE, LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, @@ -74,11 +73,9 @@ struct lpfc_nodelist { #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ - struct timer_list nlp_tmofunc; /* Used for nodev tmo */ struct fc_rport *rport; /* Corresponding FC transport port structure */ struct lpfc_hba *nlp_phba; - struct lpfc_work_evt nodev_timeout_evt; struct lpfc_work_evt els_retry_evt; unsigned long last_ramp_up_time; /* jiffy of last ramp up */ unsigned long last_q_full_time; /* jiffy of last queue full */ @@ -102,7 +99,6 @@ struct lpfc_nodelist { #define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ #define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ #define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ -#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */ #define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ #define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ #define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ @@ -169,7 +165,7 @@ struct lpfc_nodelist { */ /* * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped - * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers + * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers * expire, all effected nodes will receive a DEVICE_RM event. */ /* diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 53821e5..97973af 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -56,28 +56,63 @@ static uint8_t lpfcAlpaArray[] = { static void lpfc_disc_timeout_handler(struct lpfc_hba *); -static void -lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +void +lpfc_terminate_rport_io(struct fc_rport *rport) { - uint8_t *name = (uint8_t *)&ndlp->nlp_portname; - int warn_on = 0; + struct lpfc_rport_data *rdata; + struct lpfc_nodelist * ndlp; + struct lpfc_hba *phba; - spin_lock_irq(phba->host->host_lock); - if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) { - spin_unlock_irq(phba->host->host_lock); + rdata = rport->dd_data; + ndlp = rdata->pnode; + + if (!ndlp) { + if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) + printk(KERN_ERR "Cannot find remote node" + " to terminate I/O Data x%x\n", + rport->port_id); return; } - /* - * If a discovery event readded nodev_timer after timer - * firing and before processing the timer, cancel the - * nlp_tmofunc. - */ - spin_unlock_irq(phba->host->host_lock); - del_timer_sync(&ndlp->nlp_tmofunc); + phba = ndlp->nlp_phba; + spin_lock_irq(phba->host->host_lock); + if (ndlp->nlp_sid != NLP_NO_SID) { + lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], + ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); + } + spin_unlock_irq(phba->host->host_lock); + + return; +} + +/* + * This function will be called when dev_loss_tmo fire. + */ +void +lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) +{ + struct lpfc_rport_data *rdata; + struct lpfc_nodelist * ndlp; + uint8_t *name; + int warn_on = 0; + struct lpfc_hba *phba; + + rdata = rport->dd_data; + ndlp = rdata->pnode; - ndlp->nlp_flag &= ~NLP_NODEV_TMO; + if (!ndlp) { + if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) + printk(KERN_ERR "Cannot find remote node" + " for rport in dev_loss_tmo_callbk x%x\n", + rport->port_id); + return; + } + + name = (uint8_t *)&ndlp->nlp_portname; + phba = ndlp->nlp_phba; + + spin_lock_irq(phba->host->host_lock); if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; @@ -85,11 +120,14 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); } + if (phba->fc_flag & FC_UNLOADING) + warn_on = 0; + spin_unlock_irq(phba->host->host_lock); if (warn_on) { lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "%d:0203 Nodev timeout on " + "%d:0203 Devloss timeout on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x Data: x%x x%x x%x\n", phba->brd_no, @@ -99,7 +137,7 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) ndlp->nlp_state, ndlp->nlp_rpi); } else { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "%d:0204 Nodev timeout on " + "%d:0204 Devloss timeout on " "WWPN %x:%x:%x:%x:%x:%x:%x:%x " "NPort x%x Data: x%x x%x x%x\n", phba->brd_no, @@ -109,7 +147,12 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) ndlp->nlp_state, ndlp->nlp_rpi); } - lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); + ndlp->rport = NULL; + rdata->pnode = NULL; + + if (!(phba->fc_flag & FC_UNLOADING)) + lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); + return; } @@ -127,11 +170,6 @@ lpfc_work_list_done(struct lpfc_hba * phba) spin_unlock_irq(phba->host->host_lock); free_evt = 1; switch (evtp->evt) { - case LPFC_EVT_NODEV_TMO: - ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); - lpfc_process_nodev_timeout(phba, ndlp); - free_evt = 0; - break; case LPFC_EVT_ELS_RETRY: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); lpfc_els_retry_delay_handler(ndlp); @@ -377,16 +415,6 @@ lpfc_linkdown(struct lpfc_hba * phba) rc = lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - /* Check config parameter use-adisc or FCP-2 */ - if ((rc != NLP_STE_FREED_NODE) && - (phba->cfg_use_adisc == 0) && - !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) { - /* We know we will have to relogin, so - * unreglogin the rpi right now to fail - * any outstanding I/Os quickly. - */ - lpfc_unreg_rpi(phba, ndlp); - } } } @@ -1104,8 +1132,11 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba, struct fc_rport *rport = ndlp->rport; struct lpfc_rport_data *rdata = rport->dd_data; - ndlp->rport = NULL; - rdata->pnode = NULL; + if (rport->scsi_target_id == -1) { + ndlp->rport = NULL; + rdata->pnode = NULL; + } + fc_remote_port_delete(rport); return; @@ -1233,17 +1264,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list); phba->fc_unmap_cnt++; phba->nport_event_cnt++; - /* stop nodev tmo if running */ - if (nlp->nlp_flag & NLP_NODEV_TMO) { - nlp->nlp_flag &= ~NLP_NODEV_TMO; - spin_unlock_irq(phba->host->host_lock); - del_timer_sync(&nlp->nlp_tmofunc); - spin_lock_irq(phba->host->host_lock); - if (!list_empty(&nlp->nodev_timeout_evt.evt_listp)) - list_del_init(&nlp->nodev_timeout_evt. - evt_listp); - - } nlp->nlp_flag &= ~NLP_NODEV_REMOVE; nlp->nlp_type |= NLP_FC_NODE; break; @@ -1254,17 +1274,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list); phba->fc_map_cnt++; phba->nport_event_cnt++; - /* stop nodev tmo if running */ - if (nlp->nlp_flag & NLP_NODEV_TMO) { - nlp->nlp_flag &= ~NLP_NODEV_TMO; - spin_unlock_irq(phba->host->host_lock); - del_timer_sync(&nlp->nlp_tmofunc); - spin_lock_irq(phba->host->host_lock); - if (!list_empty(&nlp->nodev_timeout_evt.evt_listp)) - list_del_init(&nlp->nodev_timeout_evt. - evt_listp); - - } nlp->nlp_flag &= ~NLP_NODEV_REMOVE; break; case NLP_NPR_LIST: @@ -1273,11 +1282,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); phba->fc_npr_cnt++; - if (!(nlp->nlp_flag & NLP_NODEV_TMO)) - mod_timer(&nlp->nlp_tmofunc, - jiffies + HZ * phba->cfg_nodev_tmo); - - nlp->nlp_flag |= NLP_NODEV_TMO; nlp->nlp_flag &= ~NLP_RCV_PLOGI; break; case NLP_JUST_DQ: @@ -1307,7 +1311,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) * already. If we have, and it's a scsi entity, be * sure to unblock any attached scsi devices */ - if (!nlp->rport) + if ((!nlp->rport) || (nlp->rport->port_state == + FC_PORTSTATE_BLOCKED)) lpfc_register_remote_port(phba, nlp); /* @@ -1581,15 +1586,12 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) lpfc_els_abort(phba,ndlp,0); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO); + ndlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(phba->host->host_lock); - del_timer_sync(&ndlp->nlp_tmofunc); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); - if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp)) - list_del_init(&ndlp->nodev_timeout_evt.evt_listp); if (!list_empty(&ndlp->els_retry_evt.evt_listp)) list_del_init(&ndlp->els_retry_evt.evt_listp); @@ -1606,16 +1608,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) int lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) { - if (ndlp->nlp_flag & NLP_NODEV_TMO) { - spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~NLP_NODEV_TMO; - spin_unlock_irq(phba->host->host_lock); - del_timer_sync(&ndlp->nlp_tmofunc); - if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp)) - list_del_init(&ndlp->nodev_timeout_evt.evt_listp); - - } - if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(phba, ndlp); @@ -2430,34 +2422,6 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) return; } -static void -lpfc_nodev_timeout(unsigned long ptr) -{ - struct lpfc_hba *phba; - struct lpfc_nodelist *ndlp; - unsigned long iflag; - struct lpfc_work_evt *evtp; - - ndlp = (struct lpfc_nodelist *)ptr; - phba = ndlp->nlp_phba; - evtp = &ndlp->nodev_timeout_evt; - spin_lock_irqsave(phba->host->host_lock, iflag); - - if (!list_empty(&evtp->evt_listp)) { - spin_unlock_irqrestore(phba->host->host_lock, iflag); - return; - } - evtp->evt_arg1 = ndlp; - evtp->evt = LPFC_EVT_NODEV_TMO; - list_add_tail(&evtp->evt_listp, &phba->work_list); - if (phba->work_wait) - wake_up(phba->work_wait); - - spin_unlock_irqrestore(phba->host->host_lock, iflag); - return; -} - - /* * This routine handles processing a NameServer REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -2581,11 +2545,7 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did) { memset(ndlp, 0, sizeof (struct lpfc_nodelist)); - INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp); INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); - init_timer(&ndlp->nlp_tmofunc); - ndlp->nlp_tmofunc.function = lpfc_nodev_timeout; - ndlp->nlp_tmofunc.data = (unsigned long)ndlp; init_timer(&ndlp->nlp_delayfunc); ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; ndlp->nlp_delayfunc.data = (unsigned long)ndlp; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 20449a8..d5f4150 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1813,7 +1813,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba, */ /* * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped - * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers + * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers * expire, all effected nodes will receive a DEVICE_RM event. */ /* diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a8816a8..97ae98d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -935,7 +935,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loop_count - > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) + > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) break; } @@ -978,7 +978,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) spin_lock_irq(shost->host_lock); /* * If target is not in a MAPPED state, delay the reset until - * target is rediscovered or nodev timeout expires. + * target is rediscovered or devloss timeout expires. */ while ( 1 ) { if (!pnode) @@ -1050,7 +1050,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) spin_lock_irq(phba->host->host_lock); if (++loopcnt - > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) + > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) break; cnt = lpfc_sli_sum_iocb(phba, @@ -1151,7 +1151,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) spin_lock_irq(phba->host->host_lock); if (++loopcnt - > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) + > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) break; cnt = lpfc_sli_sum_iocb(phba, @@ -1249,7 +1249,7 @@ lpfc_slave_configure(struct scsi_device *sdev) * target pointer is stored in the starget_data for the * driver's sysfs entry point functions. */ - rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; + rport->dev_loss_tmo = phba->cfg_devloss_tmo; if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_poll_fcp_ring(phba); -- cgit v0.10.2 From c3f28afa61343e3e010e3014aa0d6eba271c1558 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:47:18 -0400 Subject: [SCSI] lpfc 8.1.10 : Add support for new lpfc soft_wwpn attribute Add support for a new lpfc soft_wwpn sysfs attribute Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index efec44d..3f7f5f8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -303,6 +303,7 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; + uint64_t cfg_soft_wwpn; uint32_t dev_loss_tmo_changed; @@ -354,6 +355,8 @@ struct lpfc_hba { #define VPD_PORT 0x8 /* valid vpd port data */ #define VPD_MASK 0xf /* mask for any vpd data */ + uint8_t soft_wwpn_enable; + struct timer_list fcp_poll_timer; struct timer_list els_tmofunc; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 0de6932..9496e87 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -551,6 +551,119 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); + +static char *lpfc_soft_wwpn_key = "C99G71SL8032A"; + +static ssize_t +lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf, + size_t count) +{ + struct Scsi_Host *host = class_to_shost(cdev); + struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + unsigned int cnt = count; + + /* + * We're doing a simple sanity check for soft_wwpn setting. + * We require that the user write a specific key to enable + * the soft_wwpn attribute to be settable. Once the attribute + * is written, the enable key resets. If further updates are + * desired, the key must be written again to re-enable the + * attribute. + * + * The "key" is not secret - it is a hardcoded string shown + * here. The intent is to protect against the random user or + * application that is just writing attributes. + */ + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if ((cnt != strlen(lpfc_soft_wwpn_key)) || + (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0)) + return -EINVAL; + + phba->soft_wwpn_enable = 1; + return count; +} +static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL, + lpfc_soft_wwpn_enable_store); + +static ssize_t +lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *host = class_to_shost(cdev); + struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + return snprintf(buf, PAGE_SIZE, "0x%llx\n", phba->cfg_soft_wwpn); +} + + +static ssize_t +lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) +{ + struct Scsi_Host *host = class_to_shost(cdev); + struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; + struct completion online_compl; + int stat1=0, stat2=0; + unsigned int i, j, cnt=count; + u8 wwpn[8]; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) || + ((cnt == 17) && (*buf++ != 'x')) || + ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + return -EINVAL; + + phba->soft_wwpn_enable = 0; + + memset(wwpn, 0, sizeof(wwpn)); + + /* Validate and store the new name */ + for (i=0, j=0; i < 16; i++) { + if ((*buf >= 'a') && (*buf <= 'f')) + j = ((j << 4) | ((*buf++ -'a') + 10)); + else if ((*buf >= 'A') && (*buf <= 'F')) + j = ((j << 4) | ((*buf++ -'A') + 10)); + else if ((*buf >= '0') && (*buf <= '9')) + j = ((j << 4) | (*buf++ -'0')); + else + return -EINVAL; + if (i % 2) { + wwpn[i/2] = j & 0xff; + j = 0; + } + } + phba->cfg_soft_wwpn = wwn_to_u64(wwpn); + fc_host_port_name(host) = phba->cfg_soft_wwpn; + + dev_printk(KERN_NOTICE, &phba->pcidev->dev, + "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); + + init_completion(&online_compl); + lpfc_workq_post_event(phba, &stat1, &online_compl, LPFC_EVT_OFFLINE); + wait_for_completion(&online_compl); + if (stat1) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0463 lpfc_soft_wwpn attribute set failed to reinit " + "adapter - %d\n", phba->brd_no, stat1); + + init_completion(&online_compl); + lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE); + wait_for_completion(&online_compl); + if (stat2) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "%d:0464 lpfc_soft_wwpn attribute set failed to reinit " + "adapter - %d\n", phba->brd_no, stat2); + + return (stat1 || stat2) ? -EIO : count; +} +static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ + lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); + + static int lpfc_poll = 0; module_param(lpfc_poll, int, 0); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" @@ -832,6 +945,7 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535, LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); + struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_info, &class_device_attr_serialnum, @@ -867,6 +981,8 @@ struct class_device_attribute *lpfc_host_attrs[] = { &class_device_attr_issue_reset, &class_device_attr_lpfc_poll, &class_device_attr_lpfc_poll_tmo, + &class_device_attr_lpfc_soft_wwpn, + &class_device_attr_lpfc_soft_wwpn_enable, NULL, }; @@ -1668,6 +1784,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); phba->cfg_poll = lpfc_poll; + phba->cfg_soft_wwpn = 0L; /* * The total number of segments is the configuration value plus 2 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 97973af..d586c3d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, sizeof (struct serv_parm)); + if (phba->cfg_soft_wwpn) + u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); memcpy((uint8_t *) & phba->fc_nodename, (uint8_t *) & phba->fc_sparam.nodeName, sizeof (struct lpfc_name)); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 84e7fc5..4cdf346 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba) kfree(mp); pmb->context1 = NULL; + if (phba->cfg_soft_wwpn) + u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, sizeof (struct lpfc_name)); memcpy(&phba->fc_portname, &phba->fc_sparam.portName, -- cgit v0.10.2 From 26dacd0c9b2dc1dc987c376aeee4e80691a7dd0b Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 18 Aug 2006 17:47:24 -0400 Subject: [SCSI] lpfc 8.1.10 : Change version number to 8.1.10 Change version number to 8.1.10 Signed-off-by: James Smart Signed-off-by: James Bottomley diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c7091ea..ac41790 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.9" +#define LPFC_DRIVER_VERSION "8.1.10" #define LPFC_DRIVER_NAME "lpfc" -- cgit v0.10.2 From 884d25cc4fda20908fd4ef93dbb41d817984b68b Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 5 Sep 2006 16:26:41 -0500 Subject: [SCSI] Fix refcount breakage with 'echo "1" > scan' when target already present Spotted by: Dan Aloni The problem is there's inconsistent locking semantic usage of scsi_alloc_target(). Two callers assume the target comes back with reference unincremented and the third assumes its incremented. Fix by always making the reference incremented on return. Also fix path in target alloc that could consistently increment the parent lock. Finally document scsi_alloc_target() so its callers know what the expectations are. Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 31d05ab..fd9e281 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -266,6 +266,18 @@ static struct scsi_target *__scsi_find_target(struct device *parent, return found_starget; } +/** + * scsi_alloc_target - allocate a new or find an existing target + * @parent: parent of the target (need not be a scsi host) + * @channel: target channel number (zero if no channels) + * @id: target id number + * + * Return an existing target if one exists, provided it hasn't already + * gone into STARGET_DEL state, otherwise allocate a new target. + * + * The target is returned with an incremented reference, so the caller + * is responsible for both reaping and doing a last put + */ static struct scsi_target *scsi_alloc_target(struct device *parent, int channel, uint id) { @@ -331,14 +343,15 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, return NULL; } } + get_device(dev); return starget; found: found_target->reap_ref++; spin_unlock_irqrestore(shost->host_lock, flags); - put_device(parent); if (found_target->state != STARGET_DEL) { + put_device(parent); kfree(starget); return found_target; } @@ -1341,7 +1354,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, if (!starget) return ERR_PTR(-ENOMEM); - get_device(&starget->dev); mutex_lock(&shost->scan_mutex); if (scsi_host_scan_allowed(shost)) scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); @@ -1400,7 +1412,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, if (!starget) return; - get_device(&starget->dev); if (lun != SCAN_WILD_CARD) { /* * Scan for a specific host/chan/id/lun. @@ -1582,7 +1593,8 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) if (sdev) { sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->borken = 0; - } + } else + scsi_target_reap(starget); put_device(&starget->dev); out: mutex_unlock(&shost->scan_mutex); -- cgit v0.10.2 From f479ab87936563a286b8aa0e39003c40fa31c6da Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 6 Sep 2006 09:00:29 -0500 Subject: [SCSI] fix up non-modular SCSI The recent change to the way scsi_device_get()/put() work broke the non modular build (we do a module_refcount on a NULL). Fix this by checking for non-null before checking module_refcount(). Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index eedfd05..c35f5fc 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -873,10 +873,12 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { + struct module *module = sdev->host->hostt->module; + /* The module refcount will be zero if scsi_device_get() * was called from a module removal routine */ - if (likely(module_refcount(sdev->host->hostt->module) != 0)) - module_put(sdev->host->hostt->module); + if (module && module_refcount(module) != 0) + module_put(module); put_device(&sdev->sdev_gendev); } EXPORT_SYMBOL(scsi_device_put); -- cgit v0.10.2 From b4620233d6a3510564c561a5a2a365a1d8a34b68 Mon Sep 17 00:00:00 2001 From: Henrik Kretzschmar Date: Wed, 6 Sep 2006 10:49:48 +0200 Subject: [SCSI] scsi-driver ultrastore replace Scsi_Cmnd with struct scsi_cmnd Signed-off-by: Henrik Kretzschmar Signed-off-by: James Bottomley diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c index e681681..0372aa9 100644 --- a/drivers/scsi/ultrastor.c +++ b/drivers/scsi/ultrastor.c @@ -196,8 +196,8 @@ struct mscp { u32 sense_data PACKED; /* The following fields are for software only. They are included in the MSCP structure because they are associated with SCSI requests. */ - void (*done)(Scsi_Cmnd *); - Scsi_Cmnd *SCint; + void (*done) (struct scsi_cmnd *); + struct scsi_cmnd *SCint; ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */ }; @@ -289,7 +289,7 @@ static const unsigned short ultrastor_ports_14f[] = { static void ultrastor_interrupt(int, void *, struct pt_regs *); static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *); -static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt); +static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt); /* Always called with host lock held */ @@ -673,7 +673,7 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt) return buf; } -static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt) +static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt) { struct scatterlist *sl; long transfer_length = 0; @@ -694,7 +694,8 @@ static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt) mscp->transfer_data_length = transfer_length; } -static int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) +static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt, + void (*done) (struct scsi_cmnd *)) { struct mscp *my_mscp; #if ULTRASTOR_MAX_CMDS > 1 @@ -833,7 +834,7 @@ retry: */ -static int ultrastor_abort(Scsi_Cmnd *SCpnt) +static int ultrastor_abort(struct scsi_cmnd *SCpnt) { #if ULTRASTOR_DEBUG & UD_ABORT char out[108]; @@ -843,7 +844,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt) unsigned int mscp_index; unsigned char old_aborted; unsigned long flags; - void (*done)(Scsi_Cmnd *); + void (*done)(struct scsi_cmnd *); struct Scsi_Host *host = SCpnt->device->host; if(config.slot) @@ -960,7 +961,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt) return SUCCESS; } -static int ultrastor_host_reset(Scsi_Cmnd * SCpnt) +static int ultrastor_host_reset(struct scsi_cmnd * SCpnt) { unsigned long flags; int i; @@ -1045,8 +1046,8 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs) unsigned int mscp_index; #endif struct mscp *mscp; - void (*done)(Scsi_Cmnd *); - Scsi_Cmnd *SCtmp; + void (*done) (struct scsi_cmnd *); + struct scsi_cmnd *SCtmp; #if ULTRASTOR_MAX_CMDS == 1 mscp = &config.mscp[0]; @@ -1079,7 +1080,7 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs) return; } if (icm_status == 3) { - void (*done)(Scsi_Cmnd *) = mscp->done; + void (*done)(struct scsi_cmnd *) = mscp->done; if (done) { mscp->done = NULL; mscp->SCint->result = DID_ABORT << 16; diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h index da759a1..a692905 100644 --- a/drivers/scsi/ultrastor.h +++ b/drivers/scsi/ultrastor.h @@ -14,11 +14,13 @@ #define _ULTRASTOR_H static int ultrastor_detect(struct scsi_host_template *); -static const char *ultrastor_info(struct Scsi_Host * shpnt); -static int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); -static int ultrastor_abort(Scsi_Cmnd *); -static int ultrastor_host_reset(Scsi_Cmnd *); -static int ultrastor_biosparam(struct scsi_device *, struct block_device *, sector_t, int *); +static const char *ultrastor_info(struct Scsi_Host *shpnt); +static int ultrastor_queuecommand(struct scsi_cmnd *, + void (*done)(struct scsi_cmnd *)); +static int ultrastor_abort(struct scsi_cmnd *); +static int ultrastor_host_reset(struct scsi_cmnd *); +static int ultrastor_biosparam(struct scsi_device *, struct block_device *, + sector_t, int *); #define ULTRASTOR_14F_MAX_SG 16 -- cgit v0.10.2 From 88edf74610bd894b93438f389688bc8b4a2d3414 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 6 Sep 2006 17:36:13 -0500 Subject: [SCSI] SAS: consolidate linkspeed definitions At the moment we have two separate linkspeed enumerations covering roughly the same values. This patch consolidates on a single one enum sas_linkspeed in scsi_transport_sas.h and uses it everywhere in the aic94xx driver. Eventually I'll get around to removing the duplicated fields in asd_sas_phy and sas_phy ... Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 075cea8..a242013 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -96,7 +96,7 @@ static int asd_init_phy(struct asd_phy *phy) sas_phy->type = PHY_TYPE_PHYSICAL; sas_phy->role = PHY_ROLE_INITIATOR; sas_phy->oob_mode = OOB_NOT_CONNECTED; - sas_phy->linkrate = PHY_LINKRATE_NONE; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; phy->id_frm_tok = asd_alloc_coherent(asd_ha, sizeof(*phy->identify_frame), diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 69aa708..302b54f 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -240,10 +240,14 @@ static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha) /* All phys are enabled, by default. */ asd_ha->hw_prof.enabled_phys = 0xFF; for (i = 0; i < ASD_MAX_PHYS; i++) { - asd_ha->hw_prof.phy_desc[i].max_sas_lrate = PHY_LINKRATE_3; - asd_ha->hw_prof.phy_desc[i].min_sas_lrate = PHY_LINKRATE_1_5; - asd_ha->hw_prof.phy_desc[i].max_sata_lrate= PHY_LINKRATE_1_5; - asd_ha->hw_prof.phy_desc[i].min_sata_lrate= PHY_LINKRATE_1_5; + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = + SAS_LINK_RATE_3_0_GBPS; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = + SAS_LINK_RATE_1_5_GBPS; + asd_ha->hw_prof.phy_desc[i].max_sata_lrate = + SAS_LINK_RATE_1_5_GBPS; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate = + SAS_LINK_RATE_1_5_GBPS; } return 0; diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index fc1b743..ef8ca08 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -55,15 +55,15 @@ static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) switch (oob_mode & 7) { case PHY_SPEED_60: /* FIXME: sas transport class doesn't have this */ - phy->sas_phy.linkrate = PHY_LINKRATE_6; + phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; break; case PHY_SPEED_30: - phy->sas_phy.linkrate = PHY_LINKRATE_3; + phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; break; case PHY_SPEED_15: - phy->sas_phy.linkrate = PHY_LINKRATE_1_5; + phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; break; } @@ -540,39 +540,39 @@ static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS; switch (pd->max_sas_lrate) { - case PHY_LINKRATE_6: + case SAS_LINK_RATE_6_0_GBPS: *speed_mask &= ~SAS_SPEED_60_DIS; default: - case PHY_LINKRATE_3: + case SAS_LINK_RATE_3_0_GBPS: *speed_mask &= ~SAS_SPEED_30_DIS; - case PHY_LINKRATE_1_5: + case SAS_LINK_RATE_1_5_GBPS: *speed_mask &= ~SAS_SPEED_15_DIS; } switch (pd->min_sas_lrate) { - case PHY_LINKRATE_6: + case SAS_LINK_RATE_6_0_GBPS: *speed_mask |= SAS_SPEED_30_DIS; - case PHY_LINKRATE_3: + case SAS_LINK_RATE_3_0_GBPS: *speed_mask |= SAS_SPEED_15_DIS; default: - case PHY_LINKRATE_1_5: + case SAS_LINK_RATE_1_5_GBPS: /* nothing to do */ ; } switch (pd->max_sata_lrate) { - case PHY_LINKRATE_3: + case SAS_LINK_RATE_3_0_GBPS: *speed_mask &= ~SATA_SPEED_30_DIS; default: - case PHY_LINKRATE_1_5: + case SAS_LINK_RATE_1_5_GBPS: *speed_mask &= ~SATA_SPEED_15_DIS; } switch (pd->min_sata_lrate) { - case PHY_LINKRATE_3: + case SAS_LINK_RATE_3_0_GBPS: *speed_mask |= SATA_SPEED_15_DIS; default: - case PHY_LINKRATE_1_5: + case SAS_LINK_RATE_1_5_GBPS: /* nothing to do */ ; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index b653a26..02e796e 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -191,20 +191,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; - switch (phy->linkrate) { - case PHY_LINKRATE_1_5: - phy->phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; - break; - case PHY_LINKRATE_3: - phy->phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; - break; - case PHY_LINKRATE_6: - phy->phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; - break; - default: - phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; - break; - } + phy->phy->negotiated_linkrate = phy->linkrate; if (!rediscover) sas_phy_add(phy->phy); @@ -450,7 +437,7 @@ static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) struct ex_phy *phy = &ex->ex_phy[phy_id]; sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE); - phy->linkrate = PHY_DISABLED; + phy->linkrate = SAS_PHY_DISABLED; } static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) @@ -743,7 +730,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) int res = 0; /* Phy state */ - if (ex_phy->linkrate == PHY_SPINUP_HOLD) { + if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET)) res = sas_ex_phy_discover(dev, phy_id); if (res) @@ -773,7 +760,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) sas_configure_routing(dev, ex_phy->attached_sas_addr); } return 0; - } else if (ex_phy->linkrate == PHY_LINKRATE_UNKNOWN) + } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; if (ex_phy->attached_dev_type != SAS_END_DEV && @@ -922,9 +909,9 @@ static int sas_ex_discover_devices(struct domain_device *dev, int single) continue; switch (ex_phy->linkrate) { - case PHY_DISABLED: - case PHY_RESET_PROBLEM: - case PHY_PORT_SELECTOR: + case SAS_PHY_DISABLED: + case SAS_PHY_RESET_PROBLEM: + case SAS_SATA_PORT_SELECTOR: continue; default: res = sas_ex_discover_dev(dev, i); diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 89c3976..0d69ede 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -43,7 +43,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost); int sas_show_class(enum sas_class class, char *buf); int sas_show_proto(enum sas_proto proto, char *buf); -int sas_show_linkrate(enum sas_phy_linkrate linkrate, char *buf); +int sas_show_linkrate(enum sas_linkrate linkrate, char *buf); int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf); int sas_register_phys(struct sas_ha_struct *sas_ha); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 72acdab..8d91313 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -114,7 +114,7 @@ struct ex_phy { enum ex_phy_state phy_state; enum sas_dev_type attached_dev_type; - enum sas_phy_linkrate linkrate; + enum sas_linkrate linkrate; u8 attached_sata_host:1; u8 attached_sata_dev:1; @@ -170,9 +170,9 @@ struct sata_device { struct domain_device { enum sas_dev_type dev_type; - enum sas_phy_linkrate linkrate; - enum sas_phy_linkrate min_linkrate; - enum sas_phy_linkrate max_linkrate; + enum sas_linkrate linkrate; + enum sas_linkrate min_linkrate; + enum sas_linkrate max_linkrate; int pathways; @@ -220,7 +220,7 @@ struct asd_sas_port { struct domain_device *port_dev; spinlock_t dev_list_lock; struct list_head dev_list; - enum sas_phy_linkrate linkrate; + enum sas_linkrate linkrate; struct sas_phy *phy; struct work_struct work; @@ -276,7 +276,7 @@ struct asd_sas_phy { enum sas_phy_type type; enum sas_phy_role role; enum sas_oob_mode oob_mode; - enum sas_phy_linkrate linkrate; + enum sas_linkrate linkrate; u8 *sas_addr; /* must be set */ u8 attached_sas_addr[SAS_ADDR_SIZE]; /* class:RO, driver: R/W */ @@ -368,7 +368,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr); static inline void sas_phy_disconnected(struct asd_sas_phy *phy) { phy->oob_mode = OOB_NOT_CONNECTED; - phy->linkrate = PHY_LINKRATE_NONE; + phy->linkrate = SAS_LINK_RATE_UNKNOWN; } /* ---------- Tasks ---------- */ diff --git a/include/scsi/sas.h b/include/scsi/sas.h index 752853a..9c8a5b9 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -102,20 +102,6 @@ enum sas_dev_type { SATA_PM_PORT= 8, }; -enum sas_phy_linkrate { - PHY_LINKRATE_NONE = 0, - PHY_LINKRATE_UNKNOWN = 0, - PHY_DISABLED, - PHY_RESET_PROBLEM, - PHY_SPINUP_HOLD, - PHY_PORT_SELECTOR, - PHY_LINKRATE_1_5 = 0x08, - PHY_LINKRATE_G1 = PHY_LINKRATE_1_5, - PHY_LINKRATE_3 = 0x09, - PHY_LINKRATE_G2 = PHY_LINKRATE_3, - PHY_LINKRATE_6 = 0x0A, -}; - /* Partly from IDENTIFY address frame. */ enum sas_proto { SATA_PROTO = 1, diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index eeb2200..87de518 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -24,15 +24,23 @@ enum sas_protocol { }; enum sas_linkrate { - SAS_LINK_RATE_UNKNOWN, - SAS_PHY_DISABLED, - SAS_LINK_RATE_FAILED, - SAS_SATA_SPINUP_HOLD, - SAS_SATA_PORT_SELECTOR, - SAS_LINK_RATE_1_5_GBPS, - SAS_LINK_RATE_3_0_GBPS, - SAS_LINK_RATE_6_0_GBPS, - SAS_LINK_VIRTUAL, + /* These Values are defined in the SAS standard */ + SAS_LINK_RATE_UNKNOWN = 0, + SAS_PHY_DISABLED = 1, + SAS_PHY_RESET_PROBLEM = 2, + SAS_SATA_SPINUP_HOLD = 3, + SAS_SATA_PORT_SELECTOR = 4, + SAS_PHY_RESET_IN_PROGRESS = 5, + SAS_LINK_RATE_1_5_GBPS = 8, + SAS_LINK_RATE_G1 = SAS_LINK_RATE_1_5_GBPS, + SAS_LINK_RATE_3_0_GBPS = 9, + SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS, + SAS_LINK_RATE_6_0_GBPS = 10, + /* These are virtual to the transport class and may never + * be signalled normally since the standard defined field + * is only 4 bits */ + SAS_LINK_RATE_FAILED = 0x10, + SAS_PHY_VIRTUAL = 0x11, }; struct sas_identify { -- cgit v0.10.2 From d24e1eeb3a16e4944288c2f3bf082e1513f4b425 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 6 Sep 2006 19:25:22 -0500 Subject: [SCSI] scsi_transport_sas: make minimum and maximum linkrate settable quantities According to SPEC, the minimum_linkrate and maximum_linkrate should be settable by the user. This patch introduces a callback that allows the sas class to pass these settings on to the driver. Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d518c12..b5b0c2c 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -77,6 +77,24 @@ get_sas_##title##_names(u32 table_key, char *buf) \ return len; \ } +#define sas_bitfield_name_set(title, table) \ +static ssize_t \ +set_sas_##title##_names(u32 *table_key, const char *buf) \ +{ \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + len = strlen(table[i].name); \ + if (strncmp(buf, table[i].name, len) == 0 && \ + (buf[len] == '\n' || buf[len] == '\0')) { \ + *table_key = table[i].value; \ + return 0; \ + } \ + } \ + return -EINVAL; \ +} + #define sas_bitfield_name_search(title, table) \ static ssize_t \ get_sas_##title##_names(u32 table_key, char *buf) \ @@ -131,7 +149,7 @@ static struct { { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, }; sas_bitfield_name_search(linkspeed, sas_linkspeed_names) - +sas_bitfield_name_set(linkspeed, sas_linkspeed_names) /* * SAS host attributes @@ -253,10 +271,39 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \ return get_sas_linkspeed_names(phy->field, buf); \ } +/* Fudge to tell if we're minimum or maximum */ +#define sas_phy_store_linkspeed(field) \ +static ssize_t \ +store_sas_phy_##field(struct class_device *cdev, const char *buf, \ + size_t count) \ +{ \ + struct sas_phy *phy = transport_class_to_phy(cdev); \ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ + struct sas_internal *i = to_sas_internal(shost->transportt); \ + u32 value; \ + struct sas_phy_linkrates rates = {0}; \ + int error; \ + \ + error = set_sas_linkspeed_names(&value, buf); \ + if (error) \ + return error; \ + rates.field = value; \ + error = i->f->set_phy_speed(phy, &rates); \ + \ + return error ? error : count; \ +} + +#define sas_phy_linkspeed_rw_attr(field) \ + sas_phy_show_linkspeed(field) \ + sas_phy_store_linkspeed(field) \ +static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \ + store_sas_phy_##field) + #define sas_phy_linkspeed_attr(field) \ sas_phy_show_linkspeed(field) \ static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) + #define sas_phy_show_linkerror(field) \ static ssize_t \ show_sas_phy_##field(struct class_device *cdev, char *buf) \ @@ -326,9 +373,9 @@ sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); //sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int); sas_phy_linkspeed_attr(negotiated_linkrate); sas_phy_linkspeed_attr(minimum_linkrate_hw); -sas_phy_linkspeed_attr(minimum_linkrate); +sas_phy_linkspeed_rw_attr(minimum_linkrate); sas_phy_linkspeed_attr(maximum_linkrate_hw); -sas_phy_linkspeed_attr(maximum_linkrate); +sas_phy_linkspeed_rw_attr(maximum_linkrate); sas_phy_linkerror_attr(invalid_dword_count); sas_phy_linkerror_attr(running_disparity_error_count); sas_phy_linkerror_attr(loss_of_dword_sync_count); @@ -1310,13 +1357,23 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, * Setup / Teardown code */ -#define SETUP_TEMPLATE(attrb, field, perm, test) \ +#define SETUP_TEMPLATE(attrb, field, perm, test) \ i->private_##attrb[count] = class_device_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ +#define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \ + i->private_##attrb[count] = class_device_attr_##field; \ + i->private_##attrb[count].attr.mode = perm; \ + if (ro_test) { \ + i->private_##attrb[count].attr.mode = ro_perm; \ + i->private_##attrb[count].store = NULL; \ + } \ + i->attrb[count] = &i->private_##attrb[count]; \ + if (test) \ + count++ #define SETUP_RPORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) @@ -1327,6 +1384,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, #define SETUP_PHY_ATTRIBUTE(field) \ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) +#define SETUP_PHY_ATTRIBUTE_RW(field) \ + SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ + !i->f->set_phy_speed, S_IRUGO) + #define SETUP_PORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) @@ -1407,9 +1468,9 @@ sas_attach_transport(struct sas_function_template *ft) //SETUP_PHY_ATTRIBUTE(port_identifier); SETUP_PHY_ATTRIBUTE(negotiated_linkrate); SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); - SETUP_PHY_ATTRIBUTE(minimum_linkrate); + SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate); SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw); - SETUP_PHY_ATTRIBUTE(maximum_linkrate); + SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate); SETUP_PHY_ATTRIBUTE(invalid_dword_count); SETUP_PHY_ATTRIBUTE(running_disparity_error_count); diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 87de518..5302437 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -150,12 +150,18 @@ struct sas_port { #define transport_class_to_sas_port(cdev) \ dev_to_sas_port((cdev)->dev) +struct sas_phy_linkrates { + enum sas_linkrate maximum_linkrate; + enum sas_linkrate minimum_linkrate; +}; + /* The functions by which the transport class and the driver communicate */ struct sas_function_template { int (*get_linkerrors)(struct sas_phy *); int (*get_enclosure_identifier)(struct sas_rphy *, u64 *); int (*get_bay_identifier)(struct sas_rphy *); int (*phy_reset)(struct sas_phy *, int); + int (*set_phy_speed)(struct sas_phy *, struct sas_phy_linkrates *); }; -- cgit v0.10.2 From a01e70e570a72b8a8c9a58062e4f5bdcd3986222 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 6 Sep 2006 19:28:07 -0500 Subject: [SCSI] aci94xx: implement link rate setting This patch implements the ability to set the minimum and maximum linkrates for both libsas (for expanders) and aic94xx (for the host phys). It also tidies up the setting of the hardware min and max to make sure they're updated when the expander emits a change broadcast. Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h index cb7caf1..1bd5b4e 100644 --- a/drivers/scsi/aic94xx/aic94xx.h +++ b/drivers/scsi/aic94xx/aic94xx.h @@ -109,6 +109,6 @@ int asd_clear_nexus_port(struct asd_sas_port *port); int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha); /* ---------- Phy Management ---------- */ -int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func); +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg); #endif diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index ef8ca08..7ee49b5 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -52,6 +52,8 @@ static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) { + struct sas_phy *sas_phy = phy->sas_phy.phy; + switch (oob_mode & 7) { case PHY_SPEED_60: /* FIXME: sas transport class doesn't have this */ @@ -67,6 +69,12 @@ static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; break; } + sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; + sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate; + sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate; + if (oob_mode & SAS_MODE) phy->sas_phy.oob_mode = SAS_OOB_MODE; else if (oob_mode & SATA_MODE) @@ -710,14 +718,32 @@ static const int phy_func_table[] = { [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD, }; -int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func) +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg) { struct asd_ha_struct *asd_ha = phy->ha->lldd_ha; + struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc; struct asd_ascb *ascb; + struct sas_phy_linkrates *rates; int res = 1; - if (func == PHY_FUNC_CLEAR_ERROR_LOG) + switch (func) { + case PHY_FUNC_CLEAR_ERROR_LOG: return -ENOSYS; + case PHY_FUNC_SET_LINK_RATE: + rates = arg; + if (rates->minimum_linkrate) { + pd->min_sas_lrate = rates->minimum_linkrate; + pd->min_sata_lrate = rates->minimum_linkrate; + } + if (rates->maximum_linkrate) { + pd->max_sas_lrate = rates->maximum_linkrate; + pd->max_sata_lrate = rates->maximum_linkrate; + } + func = PHY_FUNC_LINK_RESET; + break; + default: + break; + } ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); if (!ascb) diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 02e796e..30b8014 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -187,10 +187,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; phy->phy->identify.phy_identifier = phy_id; - phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; - phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; - phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; + phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; + phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; + phy->phy->minimum_linkrate = dr->pmin_linkrate; + phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; if (!rediscover) @@ -404,7 +404,8 @@ out: #define PC_RESP_SIZE 8 int sas_smp_phy_control(struct domain_device *dev, int phy_id, - enum phy_func phy_func) + enum phy_func phy_func, + struct sas_phy_linkrates *rates) { u8 *pc_req; u8 *pc_resp; @@ -423,6 +424,10 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id, pc_req[1] = SMP_PHY_CONTROL; pc_req[9] = phy_id; pc_req[10]= phy_func; + if (rates) { + pc_req[32] = rates->minimum_linkrate << 4; + pc_req[33] = rates->maximum_linkrate << 4; + } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); @@ -436,7 +441,7 @@ static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; - sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE); + sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL); phy->linkrate = SAS_PHY_DISABLED; } @@ -731,7 +736,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) /* Phy state */ if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) { - if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET)) + if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL)) res = sas_ex_phy_discover(dev, phy_id); if (res) return res; @@ -1706,6 +1711,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id) SAS_ADDR(phy->attached_sas_addr)) { SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", SAS_ADDR(dev->sas_addr), phy_id); + sas_ex_phy_discover(dev, phy_id); } else res = sas_discover_new(dev, phy_id); out: diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index b961664..c836a23 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -159,17 +159,57 @@ static int sas_phy_reset(struct sas_phy *phy, int hard_reset) struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); - ret = i->dft->lldd_control_phy(asd_phy, reset_type); + ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL); } else { struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); struct domain_device *ddev = sas_find_dev_by_rphy(rphy); - ret = sas_smp_phy_control(ddev, phy->number, reset_type); + ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL); } return ret; } +static int sas_set_phy_speed(struct sas_phy *phy, + struct sas_phy_linkrates *rates) +{ + int ret; + + if ((rates->minimum_linkrate && + rates->minimum_linkrate > phy->maximum_linkrate) || + (rates->maximum_linkrate && + rates->maximum_linkrate < phy->minimum_linkrate)) + return -EINVAL; + + if (rates->minimum_linkrate && + rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (rates->maximum_linkrate && + rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE, + rates); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + ret = sas_smp_phy_control(ddev, phy->number, + PHY_FUNC_LINK_RESET, rates); + + } + + return ret; +} + static struct sas_function_template sft = { .phy_reset = sas_phy_reset, + .set_phy_speed = sas_set_phy_speed, .get_linkerrors = sas_get_linkerrors, }; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 0d69ede..bffcee4 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -70,7 +70,7 @@ int sas_notify_lldd_dev_found(struct domain_device *); void sas_notify_lldd_dev_gone(struct domain_device *); int sas_smp_phy_control(struct domain_device *dev, int phy_id, - enum phy_func phy_func); + enum phy_func phy_func, struct sas_phy_linkrates *); int sas_smp_get_phy_events(struct sas_phy *phy); struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 024ab00..9340cdb 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -67,13 +67,14 @@ static void sas_phye_oob_error(void *data) switch (phy->error) { case 1: case 2: - i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET); + i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET, + NULL); break; case 3: default: phy->error = 0; phy->enabled = 0; - i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE); + i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL); break; } } @@ -90,7 +91,7 @@ static void sas_phye_spinup_hold(void *data) &phy->phy_events_pending); phy->error = 0; - i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD); + i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); } /* ---------- Phy class registration ---------- */ @@ -144,10 +145,10 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->phy->identify.target_port_protocols = phy->tproto; phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr); phy->phy->identify.phy_identifier = i; - phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; - phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; - phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; + phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; + phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; + phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; sas_phy_add(phy->phy); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 8d91313..8e39982 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -586,7 +586,7 @@ struct sas_domain_function_template { int (*lldd_clear_nexus_ha)(struct sas_ha_struct *); /* Phy management */ - int (*lldd_control_phy)(struct asd_sas_phy *, enum phy_func); + int (*lldd_control_phy)(struct asd_sas_phy *, enum phy_func, void *); }; extern int sas_register_ha(struct sas_ha_struct *); diff --git a/include/scsi/sas.h b/include/scsi/sas.h index 9c8a5b9..2f4b6af 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -121,6 +121,7 @@ enum phy_func { PHY_FUNC_CLEAR_AFFIL, PHY_FUNC_TX_SATA_PS_SIGNAL, PHY_FUNC_RELEASE_SPINUP_HOLD = 0x10, /* LOCAL PORT ONLY! */ + PHY_FUNC_SET_LINK_RATE, }; /* SAS LLDD would need to report only _very_few_ of those, like BROADCAST. -- cgit v0.10.2 From 2b7cbe20174695bca1afe2a8f755e1eb299f4768 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 7 Sep 2006 15:14:46 -0500 Subject: [SCSI] fix up SCSI netlink build CONFIG_SCSI_NETLINK can become a bool since the item its selecting (CONFIG_NET) cannot be a module. Signed-off-by: James Bottomley diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4d1998d..a6f920d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -28,7 +28,7 @@ config SCSI (the one containing the directory /) is located on a SCSI device. config SCSI_NETLINK - tristate + bool default n select NET -- cgit v0.10.2 From a506b44bb5000b2652490a906c3e58beb2a8f6bb Mon Sep 17 00:00:00 2001 From: Daniel Walker Date: Sat, 9 Sep 2006 09:31:03 -0700 Subject: [SCSI] fix compile error on module_refcount LD .tmp_vmlinux1 drivers/built-in.o(.text+0x8e1f9): In function `scsi_device_put': drivers/scsi/scsi.c:887: undefined reference to `module_refcount' make: *** [.tmp_vmlinux1] Error 1 There are only two users of module_refcount() outside of kernel/module.c and the other one uses ifdef's similar to this. Signed-Off-By: Daniel Walker Signed-off-by: James Bottomley diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index c35f5fc..c51b576 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -875,10 +875,12 @@ void scsi_device_put(struct scsi_device *sdev) { struct module *module = sdev->host->hostt->module; +#ifdef CONFIG_MODULE_UNLOAD /* The module refcount will be zero if scsi_device_get() * was called from a module removal routine */ if (module && module_refcount(module) != 0) module_put(module); +#endif put_device(&sdev->sdev_gendev); } EXPORT_SYMBOL(scsi_device_put); -- cgit v0.10.2 From 65396410af63db90d6428c678ff84aa652c3c1ec Mon Sep 17 00:00:00 2001 From: Henrik Kretzschmar Date: Tue, 12 Sep 2006 23:49:33 +0200 Subject: [SCSI] wd33c93: Scsi_Cmnd convertion Changes obsolete typedef'd Scsi_Cmnd to struct scsi_cmnd. Signed-off-by: Henrik Kretzschmar Signed-off-by: James Bottomley diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index fddfa2e..0854069 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -40,7 +40,7 @@ static irqreturn_t a2091_intr (int irq, void *_instance, struct pt_regs *fp) return IRQ_HANDLED; } -static int dma_setup (Scsi_Cmnd *cmd, int dir_in) +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -115,7 +115,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in) return 0; } -static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { /* disable SCSI interrupts */ @@ -217,7 +217,7 @@ int __init a2091_detect(struct scsi_host_template *tpnt) return num_a2091; } -static int a2091_bus_reset(Scsi_Cmnd *cmd) +static int a2091_bus_reset(struct scsi_cmnd *cmd) { /* FIXME perform bus-specific reset */ diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h index 22d6a13..fe809bc 100644 --- a/drivers/scsi/a2091.h +++ b/drivers/scsi/a2091.h @@ -13,10 +13,6 @@ int a2091_detect(struct scsi_host_template *); int a2091_release(struct Scsi_Host *); -const char *wd33c93_info(void); -int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); -int wd33c93_abort(Scsi_Cmnd *); -int wd33c93_reset(Scsi_Cmnd *, unsigned int); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index ae9ab4b..7bf46d4 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -44,7 +44,7 @@ static irqreturn_t a3000_intr (int irq, void *dummy, struct pt_regs *fp) return IRQ_NONE; } -static int dma_setup (Scsi_Cmnd *cmd, int dir_in) +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -110,8 +110,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in) return 0; } -static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, - int status) +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) { /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; @@ -205,7 +205,7 @@ fail_register: return 0; } -static int a3000_bus_reset(Scsi_Cmnd *cmd) +static int a3000_bus_reset(struct scsi_cmnd *cmd) { /* FIXME perform bus-specific reset */ diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h index 5535a65..44a4ec7 100644 --- a/drivers/scsi/a3000.h +++ b/drivers/scsi/a3000.h @@ -13,10 +13,6 @@ int a3000_detect(struct scsi_host_template *); int a3000_release(struct Scsi_Host *); -const char *wd33c93_info(void); -int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); -int wd33c93_abort(Scsi_Cmnd *); -int wd33c93_reset(Scsi_Cmnd *, unsigned int); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index a0d831b..18dbe5c 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -47,7 +47,7 @@ void gvp11_setup (char *str, int *ints) gvp11_xfer_mask = ints[1]; } -static int dma_setup (Scsi_Cmnd *cmd, int dir_in) +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { unsigned short cntr = GVP11_DMAC_INT_ENABLE; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -142,8 +142,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in) return 0; } -static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, - int status) +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, + int status) { /* stop DMA */ DMA(instance)->SP_DMA = 1; @@ -341,7 +341,7 @@ release: return num_gvp11; } -static int gvp11_bus_reset(Scsi_Cmnd *cmd) +static int gvp11_bus_reset(struct scsi_cmnd *cmd) { /* FIXME perform bus-specific reset */ diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h index 575d219d..bf22859 100644 --- a/drivers/scsi/gvp11.h +++ b/drivers/scsi/gvp11.h @@ -13,10 +13,6 @@ int gvp11_detect(struct scsi_host_template *); int gvp11_release(struct Scsi_Host *); -const char *wd33c93_info(void); -int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); -int wd33c93_abort(Scsi_Cmnd *); -int wd33c93_reset(Scsi_Cmnd *, unsigned int); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index cb367c2..9b991b7 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c @@ -29,7 +29,7 @@ static irqreturn_t mvme147_intr (int irq, void *dummy, struct pt_regs *fp) return IRQ_HANDLED; } -static int dma_setup (Scsi_Cmnd *cmd, int dir_in) +static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { unsigned char flags = 0x01; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -57,7 +57,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in) return 0; } -static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { m147_pcc->dma_cntrl = 0; @@ -112,7 +112,7 @@ int mvme147_detect(struct scsi_host_template *tpnt) return 0; } -static int mvme147_bus_reset(Scsi_Cmnd *cmd) +static int mvme147_bus_reset(struct scsi_cmnd *cmd) { /* FIXME perform bus-specific reset */ diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h index 2f56d69..32aee854 100644 --- a/drivers/scsi/mvme147.h +++ b/drivers/scsi/mvme147.h @@ -12,10 +12,6 @@ int mvme147_detect(struct scsi_host_template *); int mvme147_release(struct Scsi_Host *); -const char *wd33c93_info(void); -int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); -int wd33c93_abort(Scsi_Cmnd *); -int wd33c93_reset(Scsi_Cmnd *, unsigned int); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index 7cd366f..4f1db6f 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -97,7 +97,7 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id, struct pt_regs *regs) } static inline -void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp) +void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp) { unsigned long len = cmd->SCp.this_residual; void *addr = cmd->SCp.ptr; @@ -129,7 +129,7 @@ void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp) hcp->desc.cntinfo = HPCDMA_EOX; } -static int dma_setup(Scsi_Cmnd *cmd, int datainp) +static int dma_setup(struct scsi_cmnd *cmd, int datainp) { struct ip22_hostdata *hdata = HDATA(cmd->device->host); struct hpc3_scsiregs *hregs = @@ -163,7 +163,7 @@ static int dma_setup(Scsi_Cmnd *cmd, int datainp) return 0; } -static void dma_stop(struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, +static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { struct ip22_hostdata *hdata = HDATA(instance); @@ -305,7 +305,7 @@ static int sgiwd93_release(struct Scsi_Host *instance) return 1; } -static int sgiwd93_bus_reset(Scsi_Cmnd *cmd) +static int sgiwd93_bus_reset(struct scsi_cmnd *cmd) { /* FIXME perform bus-specific reset */ -- cgit v0.10.2 From f2d719c65ad8f10afa7bec11315faa7badf4ecb9 Mon Sep 17 00:00:00 2001 From: Alexis Bruemmer Date: Thu, 7 Sep 2006 14:32:16 -0700 Subject: [SCSI] aic94xx: Removes Reliance on FLASH Manufacture IDs This patch removes the reliance on FLASH Manufacture IDs for validation. Signed-off-by: Alexis Bruemmer Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c index eec1e0d..83574b5 100644 --- a/drivers/scsi/aic94xx/aic94xx_sds.c +++ b/drivers/scsi/aic94xx/aic94xx_sds.c @@ -376,7 +376,6 @@ out: /* ---------- FLASH stuff ---------- */ #define FLASH_RESET 0xF0 -#define FLASH_MANUF_AMD 1 #define FLASH_SIZE 0x200000 #define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " @@ -627,7 +626,7 @@ static int asd_find_flash_dir(struct asd_ha_struct *asd_ha, static int asd_flash_getid(struct asd_ha_struct *asd_ha) { int err = 0; - u32 reg, inc; + u32 reg; reg = asd_read_reg_dword(asd_ha, EXSICNFGR); @@ -648,53 +647,7 @@ static int asd_flash_getid(struct asd_ha_struct *asd_ha) ASD_DPRINTK("couldn't reset flash(%d)\n", err); return err; } - /* Get flash info. This would most likely be AMD Am29LV family flash. - * First try the sequence for word mode. It is the same as for - * 008B (byte mode only), 160B (word mode) and 800D (word mode). - */ - reg = asd_ha->hw_prof.flash.bar; - inc = asd_ha->hw_prof.flash.wide ? 2 : 1; - asd_write_reg_byte(asd_ha, reg + 0x555, 0xAA); - asd_write_reg_byte(asd_ha, reg + 0x2AA, 0x55); - asd_write_reg_byte(asd_ha, reg + 0x555, 0x90); - asd_ha->hw_prof.flash.manuf = asd_read_reg_byte(asd_ha, reg); - asd_ha->hw_prof.flash.dev_id= asd_read_reg_byte(asd_ha,reg+inc); - asd_ha->hw_prof.flash.sec_prot = asd_read_reg_byte(asd_ha,reg+inc+inc); - /* Get out of autoselect mode. */ - err = asd_reset_flash(asd_ha); - - if (asd_ha->hw_prof.flash.manuf == FLASH_MANUF_AMD) { - ASD_DPRINTK("0Found FLASH(%d) manuf:%d, dev_id:0x%x, " - "sec_prot:%d\n", - asd_ha->hw_prof.flash.wide ? 16 : 8, - asd_ha->hw_prof.flash.manuf, - asd_ha->hw_prof.flash.dev_id, - asd_ha->hw_prof.flash.sec_prot); - return 0; - } - - /* Ok, try the sequence for byte mode of 160B and 800D. - * We may actually never need this. - */ - asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA); - asd_write_reg_byte(asd_ha, reg + 0x555, 0x55); - asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90); - asd_ha->hw_prof.flash.manuf = asd_read_reg_byte(asd_ha, reg); - asd_ha->hw_prof.flash.dev_id = asd_read_reg_byte(asd_ha, reg + 2); - asd_ha->hw_prof.flash.sec_prot = asd_read_reg_byte(asd_ha, reg + 4); - err = asd_reset_flash(asd_ha); - - if (asd_ha->hw_prof.flash.manuf == FLASH_MANUF_AMD) { - ASD_DPRINTK("1Found FLASH(%d) manuf:%d, dev_id:0x%x, " - "sec_prot:%d\n", - asd_ha->hw_prof.flash.wide ? 16 : 8, - asd_ha->hw_prof.flash.manuf, - asd_ha->hw_prof.flash.dev_id, - asd_ha->hw_prof.flash.sec_prot); - return 0; - } - - return -ENOENT; + return 0; } static u16 asd_calc_flash_chksum(u16 *p, int size) -- cgit v0.10.2 From 10d19ae5e1715c27db7009df6d59179774e7b8a1 Mon Sep 17 00:00:00 2001 From: "malahal@us.ibm.com" Date: Thu, 7 Sep 2006 15:12:42 -0700 Subject: [SCSI] aic94xx: Fix for a typo in aic94xx_init() Signed-off-by: Malahal Naineni Signed-off-by: James Bottomley diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 302b54f..ee2ccad 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -828,7 +828,7 @@ static int __init aic94xx_init(void) aic94xx_transport_template = sas_domain_attach_transport(&aic94xx_transport_functions); - if (err) + if (!aic94xx_transport_template) goto out_destroy_caches; err = pci_register_driver(&aic94xx_pci_driver); -- cgit v0.10.2 From 5fcda4224529c4e550c917668d5e96c1d3e7039b Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 14 Sep 2006 17:04:58 -0500 Subject: [SCSI] aha152x: remove static host array Fix this driver not to use a static two element host array instead use a list. This should fix panic on multiple eject reinsert of the pcmcia version of this device. Signed-off-by: James Bottomley diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index f974869..fb6a476 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -253,6 +253,7 @@ #include #include #include +#include #include #include @@ -262,6 +263,8 @@ #include #include "aha152x.h" +static LIST_HEAD(aha152x_host_list); + /* DEFINES */ @@ -423,8 +426,6 @@ MODULE_DEVICE_TABLE(isapnp, id_table); #endif /* !PCMCIA */ -static int registered_count=0; -static struct Scsi_Host *aha152x_host[2]; static struct scsi_host_template aha152x_driver_template; /* @@ -541,6 +542,7 @@ struct aha152x_hostdata { #ifdef __ISAPNP__ struct pnp_dev *pnpdev; #endif + struct list_head host_list; }; @@ -755,20 +757,9 @@ static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp) return ptr; } -static inline struct Scsi_Host *lookup_irq(int irqno) -{ - int i; - - for(i=0; iirq==irqno) - return aha152x_host[i]; - - return NULL; -} - static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs) { - struct Scsi_Host *shpnt = lookup_irq(irqno); + struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id; if (!shpnt) { printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno); @@ -791,10 +782,11 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) return NULL; } - /* need to have host registered before triggering any interrupt */ - aha152x_host[registered_count] = shpnt; - memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt)); + INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list); + + /* need to have host registered before triggering any interrupt */ + list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list); shpnt->io_port = setup->io_port; shpnt->n_io_port = IO_RANGE; @@ -907,12 +899,10 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) scsi_scan_host(shpnt); - registered_count++; - return shpnt; out_host_put: - aha152x_host[registered_count]=NULL; + list_del(&HOSTDATA(shpnt)->host_list); scsi_host_put(shpnt); return NULL; @@ -937,6 +927,7 @@ void aha152x_release(struct Scsi_Host *shpnt) #endif scsi_remove_host(shpnt); + list_del(&HOSTDATA(shpnt)->host_list); scsi_host_put(shpnt); } @@ -1459,9 +1450,12 @@ static struct work_struct aha152x_tq; */ static void run(void) { - int i; - for (i = 0; i0; + return 1; } static void __exit aha152x_exit(void) { - int i; + struct aha152x_hostdata *hd; + + list_for_each_entry(hd, &aha152x_host_list, host_list) { + struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); - for(i=0; i Date: Fri, 15 Sep 2006 14:43:11 +0200 Subject: [SCSI] megaraid: Make megaraid_ioctl() check copy_to_user() return value Check copy_to_user() return value in drivers/scsi/megaraid.c::megadev_ioctl() This gets rid of this little warning: drivers/scsi/megaraid.c:3661: warning: ignoring return value of 'copy_to_user', declared with attribute warn_unused_result Signed-off-by: Jesper Juhl Acked-by: "Ju, Seokmann" Signed-off-by: James Bottomley diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index ccb0055..b87bef6 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -3656,8 +3656,9 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, * Send the request sense data also, irrespective of * whether the user has asked for it or not. */ - copy_to_user(upthru->reqsensearea, - pthru->reqsensearea, 14); + if (copy_to_user(upthru->reqsensearea, + pthru->reqsensearea, 14)) + rval = -EFAULT; freemem_and_return: if( pthru->dataxferlen ) { -- cgit v0.10.2