summaryrefslogtreecommitdiff
path: root/arch/ia64/sn
diff options
context:
space:
mode:
authorSimon Arlott <simon@fire.lp0.eu>2007-05-11 21:55:43 (GMT)
committerTony Luck <tony.luck@intel.com>2007-05-11 21:55:43 (GMT)
commit72fdbdce3d52282f8ea95f512e871791256754e6 (patch)
treeb7d544875c5d89e10859f3e5dc97e2e064a00e54 /arch/ia64/sn
parent0a3fd051c7036ef71b58863f8e5da7c3dabd9d3f (diff)
downloadlinux-72fdbdce3d52282f8ea95f512e871791256754e6.tar.xz
[IA64] spelling fixes: arch/ia64/
Spelling and apostrophe fixes in arch/ia64/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r--arch/ia64/sn/kernel/bte.c12
-rw-r--r--arch/ia64/sn/kernel/bte_error.c4
-rw-r--r--arch/ia64/sn/kernel/io_common.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c8
-rw-r--r--arch/ia64/sn/kernel/xpnet.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c8
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c6
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c6
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c16
12 files changed, 35 insertions, 35 deletions
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index ff1c556..b362d6d 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -63,7 +63,7 @@ static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode)
* Use the block transfer engine to move kernel memory from src to dest
* using the assigned mode.
*
- * Paramaters:
+ * Parameters:
* src - physical address of the transfer source.
* dest - physical address of the transfer destination.
* len - number of bytes to transfer from source to dest.
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(bte_copy);
* use the block transfer engine to move kernel
* memory from src to dest using the assigned mode.
*
- * Paramaters:
+ * Parameters:
* src - physical address of the transfer source.
* dest - physical address of the transfer destination.
* len - number of bytes to transfer from source to dest.
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(bte_copy);
* for IBCT0/1 in the SGI documentation.
*
* NOTE: If the source, dest, and len are all cache line aligned,
- * then it would be _FAR_ preferrable to use bte_copy instead.
+ * then it would be _FAR_ preferable to use bte_copy instead.
*/
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
{
@@ -300,7 +300,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
* a standard bte copy.
*
* One nasty exception to the above rule is when the
- * source and destination are not symetrically
+ * source and destination are not symmetrically
* mis-aligned. If the source offset from the first
* cache line is different from the destination offset,
* we make the first section be the entire transfer
@@ -337,7 +337,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
/*
- * We have two contigous bcopy
+ * We have two contiguous bcopy
* blocks. Merge them.
*/
headBcopyLen += footBcopyLen;
@@ -375,7 +375,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
} else {
/*
- * The transfer is not symetric, we will
+ * The transfer is not symmetric, we will
* allocate a buffer large enough for all the
* data, bte_copy into that buffer and then
* bcopy to the destination.
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index b6fcf81..27c5936 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -105,7 +105,7 @@ int shub1_bte_error_handler(unsigned long _nodepda)
}
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
- /* Reenable both bte interfaces */
+ /* Re-enable both bte interfaces */
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
@@ -243,7 +243,7 @@ bte_crb_error_handler(cnodeid_t cnode, int btenum,
/*
* The caller has already figured out the error type, we save that
- * in the bte handle structure for the thread excercising the
+ * in the bte handle structure for the thread exercising the
* interface to consume.
*/
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 7ed72d3..787ed64 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -479,7 +479,7 @@ sn_io_early_init(void)
}
/*
- * prime sn_pci_provider[]. Individial provider init routines will
+ * prime sn_pci_provider[]. Individual provider init routines will
* override their respective default entries.
*/
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index a9bed5c..a574fcd 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -167,7 +167,7 @@ void __init early_sn_setup(void)
* IO on SN2 is done via SAL calls, early_printk won't work without this.
*
* This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
- * Any changes to those file may have to be made hereas well.
+ * Any changes to those file may have to be made here as well.
*/
efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
config_tables = __va(efi_systab->tables);
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 5d318b5..033c8a9 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -104,7 +104,7 @@ static inline unsigned long wait_piowc(void)
*
* SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
* Context switching user threads which have memory-mapped MMIO may cause
- * PIOs to issue from seperate CPUs, thus the PIO writes must be drained
+ * PIOs to issue from separate CPUs, thus the PIO writes must be drained
* from the previous CPU's Shub before execution resumes on the new CPU.
*/
void sn_migrate(struct task_struct *task)
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index c08db9c..44ccc0d 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -293,7 +293,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
/*
- * Pull the remote per partititon specific variables from the specified
+ * Pull the remote per partition specific variables from the specified
* partition.
*/
enum xpc_retval
@@ -461,7 +461,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
// >>> iterations of the for-loop, bail if set?
- // >>> should we impose a minumum #of entries? like 4 or 8?
+ // >>> should we impose a minimum #of entries? like 4 or 8?
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
@@ -514,7 +514,7 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
// >>> iterations of the for-loop, bail if set?
- // >>> should we impose a minumum #of entries? like 4 or 8?
+ // >>> should we impose a minimum #of entries? like 4 or 8?
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->msg_size;
@@ -1478,7 +1478,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
/*
- * Before proceding with the teardown we have to wait until all
+ * Before proceeding with the teardown we have to wait until all
* existing references cease.
*/
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index da72135..e58fcad 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -531,7 +531,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
/*
- * If we wanted to allow promiscous mode to work like an
+ * If we wanted to allow promiscuous mode to work like an
* unswitched network, this would be a good point to OR in a
* mask of partitions which should be receiving all packets.
*/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 7a291a2..d79ddac 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -333,7 +333,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
- * 4.10 don't implment this.
+ * 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
@@ -348,7 +348,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
- * bugs). This code is retained for compatability with old
+ * bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
@@ -379,7 +379,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
- * 4.10 don't implment this.
+ * 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
@@ -394,7 +394,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
- * bugs). This code is retained for compatability with old
+ * bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 935029f..239b3ce 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -30,7 +30,7 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number,
/*
* find_free_ate: Find the first free ate index starting from the given
- * index for the desired consequtive count.
+ * index for the desired consecutive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
@@ -88,7 +88,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
return -1;
/*
- * Find the required number of free consequtive ates.
+ * Find the required number of free consecutive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
@@ -105,7 +105,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
- * Indices in rm map range from 1..num_entries. Indicies returned
+ * Indices in rm map range from 1..num_entries. Indices returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 95af40c..e626e50 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -201,7 +201,7 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
}
/*
- * Wrapper routine for free'ing DMA maps
+ * Wrapper routine for freeing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 8a2cb4e..b9bedbd 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -223,7 +223,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
/*
* Scan all vga controllers on this bus making sure they all
- * suport FW. If not, return.
+ * support FW. If not, return.
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
@@ -364,7 +364,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
* @req_size: len (bytes) to map
*
* Map @paddr into CA address space using the GART mechanism. The mapped
- * dma_addr_t is guarenteed to be contiguous in CA bus space.
+ * dma_addr_t is guaranteed to be contiguous in CA bus space.
*/
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
@@ -526,7 +526,7 @@ tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
return 0;
/*
- * If card is 64 or 48 bit addresable, use a direct mapping. 32
+ * If card is 64 or 48 bit addressable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
*/
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 35f854f..f4c0b96 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -256,9 +256,9 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base,
* @ct_addr: the coretalk address to map
* @len: number of bytes to map
*
- * Given the addressing type, set up various paramaters that define the
+ * Given the addressing type, set up various parameters that define the
* ATE pool to use. Search for a contiguous block of entries to cover the
- * length, and if enough resources exist, fill in the ATE's and construct a
+ * length, and if enough resources exist, fill in the ATEs and construct a
* tioce_dmamap struct to track the mapping.
*/
static u64
@@ -581,8 +581,8 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
*/
if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
/*
- * We have two options for 40-bit mappings: 16GB "super" ATE's
- * and 64MB "regular" ATE's. We'll try both if needed for a
+ * We have two options for 40-bit mappings: 16GB "super" ATEs
+ * and 64MB "regular" ATEs. We'll try both if needed for a
* given mapping but which one we try first depends on the
* size. For requests >64MB, prefer to use a super page with
* regular as the fallback. Otherwise, try in the reverse order.
@@ -687,8 +687,8 @@ tioce_error_intr_handler(int irq, void *arg)
}
/**
- * tioce_reserve_m32 - reserve M32 ate's for the indicated address range
- * @tioce_kernel: TIOCE context to reserve ate's for
+ * tioce_reserve_m32 - reserve M32 ATEs for the indicated address range
+ * @tioce_kernel: TIOCE context to reserve ATEs for
* @base: starting bus address to reserve
* @limit: last bus address to reserve
*
@@ -763,7 +763,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
/*
* Set PMU pagesize to the largest size available, and zero out
- * the ate's.
+ * the ATEs.
*/
tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
@@ -784,7 +784,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
}
/*
- * Reserve ATE's corresponding to reserved address ranges. These
+ * Reserve ATEs corresponding to reserved address ranges. These
* include:
*
* Memory space covered by each PPB mem base/limit register