summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/net/ibmveth.c30
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c10
3 files changed, 21 insertions, 21 deletions
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 327b00c..8d97b39 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -904,7 +904,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
* It is possible the vty-server was removed after the irq was
* requested but before we have time to enable interrupts.
*/
- if (vio_enable_interrupts(vdev) == H_Success)
+ if (vio_enable_interrupts(vdev) == H_SUCCESS)
return 0;
else {
printk(KERN_ERR "HVCS: int enable failed for"
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ceb98fd..52d0102 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -235,7 +235,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
pool->free_map[free_index] = index;
pool->skbuff[index] = NULL;
pool->consumer_index--;
@@ -373,7 +373,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
}
@@ -511,7 +511,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->filter_list_dma,
mac_address);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
adapter->buffer_list_dma,
@@ -527,7 +527,7 @@ static int ibmveth_open(struct net_device *netdev)
ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_isLongBusy(rc) || (rc == H_Busy));
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
ibmveth_cleanup(adapter);
return rc;
@@ -556,9 +556,9 @@ static int ibmveth_close(struct net_device *netdev)
do {
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- if(lpar_rc != H_Success)
+ if(lpar_rc != H_SUCCESS)
{
ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
lpar_rc);
@@ -693,9 +693,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[4].desc,
desc[5].desc,
correlator);
- } while ((lpar_rc == H_Busy) && (retry_count--));
+ } while ((lpar_rc == H_BUSY) && (retry_count--));
- if(lpar_rc != H_Success && lpar_rc != H_Dropped) {
+ if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
int i;
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
for(i = 0; i < 6; i++) {
@@ -786,14 +786,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
/* we think we are done - reenable interrupts, then check once more to make sure we are done */
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
- ibmveth_assert(lpar_rc == H_Success);
+ ibmveth_assert(lpar_rc == H_SUCCESS);
netif_rx_complete(netdev);
if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
{
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
- ibmveth_assert(lpar_rc == H_Success);
+ ibmveth_assert(lpar_rc == H_SUCCESS);
more_work = 1;
goto restart_poll;
}
@@ -813,7 +813,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
if(netif_rx_schedule_prep(netdev)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
- ibmveth_assert(lpar_rc == H_Success);
+ ibmveth_assert(lpar_rc == H_SUCCESS);
__netif_rx_schedule(netdev);
}
return IRQ_HANDLED;
@@ -835,7 +835,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
0);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
@@ -847,7 +847,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastDisableFiltering |
IbmVethMcastClearFilterTable,
0);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
@@ -858,7 +858,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
}
}
@@ -867,7 +867,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableFiltering,
0);
- if(lpar_rc != H_Success) {
+ if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
}
}
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index f47dd87..892e8ed 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -80,7 +80,7 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
tasklet_kill(&hostdata->srp_task);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
dma_unmap_single(hostdata->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
@@ -230,7 +230,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
- if (rc == H_Resource)
+ if (rc == H_RESOURCE)
/* maybe kexecing and resource is busy. try a reset */
rc = ibmvscsi_reset_crq_queue(queue,
hostdata);
@@ -269,7 +269,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
req_irq_failed:
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
reg_crq_failed:
dma_unmap_single(hostdata->dev,
queue->msg_token,
@@ -295,7 +295,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
/* Re-enable the CRQ */
do {
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
- } while ((rc == H_InProgress) || (rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
if (rc)
printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
@@ -317,7 +317,7 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
/* Close the CRQ */
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
- } while ((rc == H_Busy) || (H_isLongBusy(rc)));
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
/* Clean out the queue */
memset(queue->msgs, 0x00, PAGE_SIZE);