diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/irq.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/psr.h | 6 | ||||
-rw-r--r-- | arch/sparc/include/asm/srmmu.h | 30 |
3 files changed, 20 insertions, 20 deletions
diff --git a/arch/sparc/include/asm/irq.h b/arch/sparc/include/asm/irq.h index bbe0204..2faf7a0 100644 --- a/arch/sparc/include/asm/irq.h +++ b/arch/sparc/include/asm/irq.h @@ -12,7 +12,7 @@ #include <asm/psr.h> /* Set SPARC Processor Interrupt Level */ -extern inline void set_pil(unsigned int level) +static inline void set_pil(unsigned int level) { unsigned int psr = get_psr(); @@ -20,7 +20,7 @@ extern inline void set_pil(unsigned int level) } /* Get SPARC Processor Interrupt Level */ -extern inline unsigned int get_pil(void) +static inline unsigned int get_pil(void) { unsigned int psr = get_psr(); return (psr & PSR_PIL) >> PSR_PIL_OFS; diff --git a/arch/sparc/include/asm/psr.h b/arch/sparc/include/asm/psr.h index 70af8e0..a91bdc9 100644 --- a/arch/sparc/include/asm/psr.h +++ b/arch/sparc/include/asm/psr.h @@ -42,7 +42,7 @@ #ifndef __ASSEMBLY__ /* Get the %psr register. */ -extern __inline__ unsigned int get_psr(void) +static __inline__ unsigned int get_psr(void) { unsigned int psr; __asm__ __volatile__("rd %%psr, %0\n\t" @@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void) return psr; } -extern __inline__ void put_psr(unsigned int new_psr) +static __inline__ void put_psr(unsigned int new_psr) { __asm__ __volatile__("wr %0, 0x0, %%psr\n\t" "nop\n\t" "nop\n\t" "nop\n\t": /* no outputs */ :"r"(new_psr) @@ -67,7 +67,7 @@ extern __inline__ void put_psr(unsigned int new_psr) extern unsigned int fsr_storage; -extern __inline__ unsigned int get_fsr(void) +static __inline__ unsigned int get_fsr(void) { unsigned int fsr = 0; diff --git a/arch/sparc/include/asm/srmmu.h b/arch/sparc/include/asm/srmmu.h index 74b1554..8da2f67 100644 --- a/arch/sparc/include/asm/srmmu.h +++ b/arch/sparc/include/asm/srmmu.h @@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool; #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) /* Accessing the MMU control register. */ -extern __inline__ unsigned int srmmu_get_mmureg(void) +static __inline__ unsigned int srmmu_get_mmureg(void) { unsigned int retval; __asm__ __volatile__("lda [%%g0] %1, %0\n\t": @@ -156,14 +156,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void) return retval; } -extern __inline__ void srmmu_set_mmureg(unsigned long regval) +static __inline__ void srmmu_set_mmureg(unsigned long regval) { __asm__ __volatile__("sta %0, [%%g0] %1\n\t"::"r"(regval), "i"(ASI_M_MMUREGS):"memory"); } -extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) +static __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) { paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); __asm__ __volatile__("sta %0, [%1] %2\n\t"::"r"(paddr), @@ -171,7 +171,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) "i"(ASI_M_MMUREGS):"memory"); } -extern __inline__ unsigned long srmmu_get_ctable_ptr(void) +static __inline__ unsigned long srmmu_get_ctable_ptr(void) { unsigned int retval; @@ -181,13 +181,13 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void) return (retval & SRMMU_CTX_PMASK) << 4; } -extern __inline__ void srmmu_set_context(int context) +static __inline__ void srmmu_set_context(int context) { __asm__ __volatile__("sta %0, [%1] %2\n\t"::"r"(context), "r"(SRMMU_CTX_REG), "i"(ASI_M_MMUREGS):"memory"); } -extern __inline__ int srmmu_get_context(void) +static __inline__ int srmmu_get_context(void) { register int retval; __asm__ __volatile__("lda [%1] %2, %0\n\t": @@ -196,7 +196,7 @@ extern __inline__ int srmmu_get_context(void) return retval; } -extern __inline__ unsigned int srmmu_get_fstatus(void) +static __inline__ unsigned int srmmu_get_fstatus(void) { unsigned int retval; @@ -206,7 +206,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void) return retval; } -extern __inline__ unsigned int srmmu_get_faddr(void) +static __inline__ unsigned int srmmu_get_faddr(void) { unsigned int retval; @@ -217,7 +217,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void) } /* This is guaranteed on all SRMMU's. */ -extern __inline__ void srmmu_flush_whole_tlb(void) +static __inline__ void srmmu_flush_whole_tlb(void) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(0x400), /* Flush entire TLB!! */ "i"(ASI_M_FLUSH_PROBE):"memory"); @@ -225,14 +225,14 @@ extern __inline__ void srmmu_flush_whole_tlb(void) } /* These flush types are not available on all chips... */ -extern __inline__ void srmmu_flush_tlb_ctx(void) +static __inline__ void srmmu_flush_tlb_ctx(void) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(0x300), /* Flush TLB ctx.. */ "i"(ASI_M_FLUSH_PROBE):"memory"); } -extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) +static __inline__ void srmmu_flush_tlb_region(unsigned long addr) { addr &= SRMMU_PGDIR_MASK; __asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(addr | 0x200), /* Flush TLB region.. */ @@ -240,7 +240,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) } -extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) +static __inline__ void srmmu_flush_tlb_segment(unsigned long addr) { addr &= SRMMU_REAL_PMD_MASK; __asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(addr | 0x100), /* Flush TLB segment.. */ @@ -248,7 +248,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) } -extern __inline__ void srmmu_flush_tlb_page(unsigned long page) +static __inline__ void srmmu_flush_tlb_page(unsigned long page) { page &= PAGE_MASK; __asm__ __volatile__("sta %%g0, [%0] %1\n\t"::"r"(page), /* Flush TLB page.. */ @@ -256,7 +256,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page) } -extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) +static __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) { unsigned long retval; @@ -268,7 +268,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) return retval; } -extern __inline__ int srmmu_get_pte(unsigned long addr) +static __inline__ int srmmu_get_pte(unsigned long addr) { register unsigned long entry; |