diff options
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge.h | 61 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_dbg.c | 62 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 80 |
3 files changed, 82 insertions, 121 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 459663a..c1dadad 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -28,11 +28,11 @@ } while (0) #define QLGE_VENDOR_ID 0x1077 -#define QLGE_DEVICE_ID1 0x8012 -#define QLGE_DEVICE_ID 0x8000 +#define QLGE_DEVICE_ID 0x8012 -#define MAX_RX_RINGS 128 -#define MAX_TX_RINGS 128 +#define MAX_CPUS 8 +#define MAX_TX_RINGS MAX_CPUS +#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) #define NUM_TX_RING_ENTRIES 256 #define NUM_RX_RING_ENTRIES 256 @@ -45,6 +45,7 @@ #define MAX_SPLIT_SIZE 1023 #define QLGE_SB_PAD 32 +#define MAX_CQ 128 #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) @@ -961,8 +962,7 @@ struct ib_mac_iocb_rsp { #define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ #define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ __le32 data_len; /* */ - __le32 data_addr_lo; /* */ - __le32 data_addr_hi; /* */ + __le64 data_addr; /* */ __le32 rss; /* */ __le16 vlan_id; /* 12 bits */ #define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ @@ -976,8 +976,7 @@ struct ib_mac_iocb_rsp { #define IB_MAC_IOCB_RSP_HS 0x40 #define IB_MAC_IOCB_RSP_HL 0x80 __le32 hdr_len; /* */ - __le32 hdr_addr_lo; /* */ - __le32 hdr_addr_hi; /* */ + __le64 hdr_addr; /* */ } __attribute((packed)); struct ib_ae_iocb_rsp { @@ -1042,10 +1041,8 @@ struct wqicb { __le16 cq_id_rss; #define Q_CQ_ID_RSS_RV 0x8000 __le16 rid; - __le32 addr_lo; - __le32 addr_hi; - __le32 cnsmr_idx_addr_lo; - __le32 cnsmr_idx_addr_hi; + __le64 addr; + __le64 cnsmr_idx_addr; } __attribute((packed)); /* @@ -1070,18 +1067,14 @@ struct cqicb { #define LEN_CPP_64 0x0002 #define LEN_CPP_128 0x0003 __le16 rid; - __le32 addr_lo; - __le32 addr_hi; - __le32 prod_idx_addr_lo; - __le32 prod_idx_addr_hi; + __le64 addr; + __le64 prod_idx_addr; __le16 pkt_delay; __le16 irq_delay; - __le32 lbq_addr_lo; - __le32 lbq_addr_hi; + __le64 lbq_addr; __le16 lbq_buf_size; __le16 lbq_len; /* entry count */ - __le32 sbq_addr_lo; - __le32 sbq_addr_hi; + __le64 sbq_addr; __le16 sbq_buf_size; __le16 sbq_len; /* entry count */ } __attribute((packed)); @@ -1145,7 +1138,7 @@ struct tx_ring { struct wqicb wqicb; /* structure used to inform chip of new queue */ void *wq_base; /* pci_alloc:virtual addr for tx */ dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ - u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ + __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ u32 wq_size; /* size in bytes of queue area */ u32 wq_len; /* number of entries in queue */ @@ -1181,7 +1174,7 @@ struct rx_ring { u32 cq_size; u32 cq_len; u16 cq_id; - volatile __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ + __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ dma_addr_t prod_idx_sh_reg_dma; void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ u32 cnsmr_idx; /* current sw idx */ @@ -1402,9 +1395,11 @@ struct ql_adapter { int rx_ring_count; int ring_mem_size; void *ring_mem; - struct rx_ring *rx_ring; + + struct rx_ring rx_ring[MAX_RX_RINGS]; + struct tx_ring tx_ring[MAX_TX_RINGS]; + int rx_csum; - struct tx_ring *tx_ring; u32 default_rx_queue; u16 rx_coalesce_usecs; /* cqicb->int_delay */ @@ -1459,6 +1454,24 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr) mmiowb(); } +/* + * Shadow Registers: + * Outbound queues have a consumer index that is maintained by the chip. + * Inbound queues have a producer index that is maintained by the chip. + * For lower overhead, these registers are "shadowed" to host memory + * which allows the device driver to track the queue progress without + * PCI reads. When an entry is placed on an inbound queue, the chip will + * update the relevant index register and then copy the value to the + * shadow register in host memory. + */ +static inline u32 ql_read_sh_reg(__le32 *addr) +{ + u32 reg; + reg = le32_to_cpu(*addr); + rmb(); + return reg; +} + extern char qlge_driver_name[]; extern const char qlge_driver_version[]; extern const struct ethtool_ops qlge_ethtool_ops; diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index 3f5e02d..379b895 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c @@ -435,14 +435,10 @@ void ql_dump_wqicb(struct wqicb *wqicb) printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", le16_to_cpu(wqicb->cq_id_rss)); printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); - printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n", - le32_to_cpu(wqicb->addr_lo)); - printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n", - le32_to_cpu(wqicb->addr_hi)); - printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n", - le32_to_cpu(wqicb->cnsmr_idx_addr_lo)); - printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n", - le32_to_cpu(wqicb->cnsmr_idx_addr_hi)); + printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(wqicb->addr)); + printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); } void ql_dump_tx_ring(struct tx_ring *tx_ring) @@ -455,10 +451,11 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", (unsigned long long) tx_ring->wq_base_dma); - printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n", - tx_ring->cnsmr_idx_sh_reg); - printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n", - (unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma); + printk(KERN_ERR PFX + "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n", + tx_ring->cnsmr_idx_sh_reg, + tx_ring->cnsmr_idx_sh_reg + ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", @@ -510,30 +507,22 @@ void ql_dump_cqicb(struct cqicb *cqicb) printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); - printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n", - le32_to_cpu(cqicb->addr_lo)); - printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n", - le32_to_cpu(cqicb->addr_hi)); - printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n", - le32_to_cpu(cqicb->prod_idx_addr_lo)); - printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n", - le32_to_cpu(cqicb->prod_idx_addr_hi)); + printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(cqicb->addr)); + printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", le16_to_cpu(cqicb->pkt_delay)); printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", le16_to_cpu(cqicb->irq_delay)); - printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n", - le32_to_cpu(cqicb->lbq_addr_lo)); - printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n", - le32_to_cpu(cqicb->lbq_addr_hi)); + printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", le16_to_cpu(cqicb->lbq_buf_size)); printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", le16_to_cpu(cqicb->lbq_len)); - printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n", - le32_to_cpu(cqicb->sbq_addr_lo)); - printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n", - le32_to_cpu(cqicb->sbq_addr_hi)); + printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", le16_to_cpu(cqicb->sbq_buf_size)); printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", @@ -558,9 +547,10 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); printk(KERN_ERR PFX - "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n", + "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n", rx_ring->prod_idx_sh_reg, - rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0); + rx_ring->prod_idx_sh_reg + ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", (unsigned long long) rx_ring->prod_idx_sh_reg_dma); printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", @@ -809,10 +799,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) printk(KERN_ERR PFX "data_len = %d\n", le32_to_cpu(ib_mac_rsp->data_len)); - printk(KERN_ERR PFX "data_addr_hi = 0x%x\n", - le32_to_cpu(ib_mac_rsp->data_addr_hi)); - printk(KERN_ERR PFX "data_addr_lo = 0x%x\n", - le32_to_cpu(ib_mac_rsp->data_addr_lo)); + printk(KERN_ERR PFX "data_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) printk(KERN_ERR PFX "rss = %x\n", le32_to_cpu(ib_mac_rsp->rss)); @@ -828,10 +816,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { printk(KERN_ERR PFX "hdr length = %d.\n", le32_to_cpu(ib_mac_rsp->hdr_len)); - printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n", - le32_to_cpu(ib_mac_rsp->hdr_addr_hi)); - printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n", - le32_to_cpu(ib_mac_rsp->hdr_addr_lo)); + printk(KERN_ERR PFX "hdr addr = 0x%llx.\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); } } #endif diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index f4c0160..45421c8 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -76,7 +76,6 @@ MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static struct pci_device_id qlge_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)}, /* required last entry */ {0,} }; @@ -127,12 +126,12 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) { - unsigned int seconds = 3; + unsigned int wait_count = 30; do { if (!ql_sem_trylock(qdev, sem_mask)) return 0; - ssleep(1); - } while (--seconds); + udelay(100); + } while (--wait_count); return -ETIMEDOUT; } @@ -1545,7 +1544,7 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev, static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) { struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); struct ob_mac_iocb_rsp *net_rsp = NULL; int count = 0; @@ -1571,7 +1570,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) } count++; ql_update_cq(rx_ring); - prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); } ql_write_cq_idx(rx_ring); if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { @@ -1591,7 +1590,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) { struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); struct ql_net_rsp_iocb *net_rsp; int count = 0; @@ -1624,7 +1623,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) } count++; ql_update_cq(rx_ring); - prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); if (count == budget) break; } @@ -1787,7 +1786,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) * Check the default queue and wake handler if active. */ rx_ring = &qdev->rx_ring[0]; - if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { + if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, @@ -1801,7 +1800,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) */ for (i = 1; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; - if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != + if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[%d].\n", i); @@ -2356,28 +2355,6 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev) } } -static void ql_free_ring_cb(struct ql_adapter *qdev) -{ - kfree(qdev->ring_mem); -} - -static int ql_alloc_ring_cb(struct ql_adapter *qdev) -{ - /* Allocate space for tx/rx ring control blocks. */ - qdev->ring_mem_size = - (qdev->tx_ring_count * sizeof(struct tx_ring)) + - (qdev->rx_ring_count * sizeof(struct rx_ring)); - qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL); - if (qdev->ring_mem == NULL) { - return -ENOMEM; - } else { - qdev->rx_ring = qdev->ring_mem; - qdev->tx_ring = qdev->ring_mem + - (qdev->rx_ring_count * sizeof(struct rx_ring)); - } - return 0; -} - static void ql_free_mem_resources(struct ql_adapter *qdev) { int i; @@ -2467,12 +2444,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); - cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); - cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32); + cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); - cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma); - cqicb->prod_idx_addr_hi = - cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32); + cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); /* * Set up the control block load flags. @@ -2483,10 +2457,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) if (rx_ring->lbq_len) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; - cqicb->lbq_addr_lo = - cpu_to_le32(rx_ring->lbq_base_indirect_dma); - cqicb->lbq_addr_hi = - cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32); + cqicb->lbq_addr = + cpu_to_le64(rx_ring->lbq_base_indirect_dma); bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : (u16) rx_ring->lbq_buf_size; cqicb->lbq_buf_size = cpu_to_le16(bq_len); @@ -2501,10 +2473,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) if (rx_ring->sbq_len) { cqicb->flags |= FLAGS_LS; /* Load sbq values */ *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; - cqicb->sbq_addr_lo = - cpu_to_le32(rx_ring->sbq_base_indirect_dma); - cqicb->sbq_addr_hi = - cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32); + cqicb->sbq_addr = + cpu_to_le64(rx_ring->sbq_base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); bq_len = (rx_ring->sbq_len == 65536) ? 0 : @@ -2611,12 +2581,9 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); wqicb->rid = 0; - wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma); - wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32); + wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); - wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma); - wqicb->cnsmr_idx_addr_hi = - cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32); + wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); ql_init_tx_ring(qdev, tx_ring); @@ -2746,14 +2713,14 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) * Outbound queue is for outbound completions only. */ intr_context->handler = qlge_msix_tx_isr; - sprintf(intr_context->name, "%s-txq-%d", + sprintf(intr_context->name, "%s-tx-%d", qdev->ndev->name, i); } else { /* * Inbound queues handle unicast frames only. */ intr_context->handler = qlge_msix_rx_isr; - sprintf(intr_context->name, "%s-rxq-%d", + sprintf(intr_context->name, "%s-rx-%d", qdev->ndev->name, i); } } @@ -3247,7 +3214,6 @@ static int qlge_close(struct net_device *ndev) msleep(1); ql_adapter_down(qdev); ql_release_adapter_resources(qdev); - ql_free_ring_cb(qdev); return 0; } @@ -3273,8 +3239,8 @@ static int ql_configure_rings(struct ql_adapter *qdev) * This limitation can be removed when requested. */ - if (cpu_cnt > 8) - cpu_cnt = 8; + if (cpu_cnt > MAX_CPUS) + cpu_cnt = MAX_CPUS; /* * rx_ring[0] is always the default queue. @@ -3294,9 +3260,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) */ qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; - if (ql_alloc_ring_cb(qdev)) - return -ENOMEM; - for (i = 0; i < qdev->tx_ring_count; i++) { tx_ring = &qdev->tx_ring[i]; memset((void *)tx_ring, 0, sizeof(tx_ring)); @@ -3393,7 +3356,6 @@ static int qlge_open(struct net_device *ndev) error_up: ql_release_adapter_resources(qdev); - ql_free_ring_cb(qdev); return err; } |