summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel/ioport.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-10-22 18:02:46 (GMT)
committerJens Axboe <jens.axboe@oracle.com>2007-10-22 19:19:59 (GMT)
commit58b053e4ce9d2fc3023645c1b96e537c72aa8d9a (patch)
tree35fbd72eb62a37375bc06c01e356afd7da0c9693 /arch/sparc/kernel/ioport.c
parentf9527f121bd42c5d300815fbf12216bc1a63f60f (diff)
downloadlinux-58b053e4ce9d2fc3023645c1b96e537c72aa8d9a.tar.xz
Update arch/ to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/sparc/kernel/ioport.c')
-rw-r--r--arch/sparc/kernel/ioport.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 9c3ed88..97aa50d 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
- sg->dvma_address =
- virt_to_phys(page_address(sg->page)) + sg->offset;
+ BUG_ON(page_address(sg_page(sg)) == NULL);
+ sg->dvma_address = virt_to_phys(sg_virt(sg));
sg->dvma_length = sg->length;
}
return nents;
@@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
@@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
@@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}