diff options
-rw-r--r-- | include/asm-mips/hazards.h | 362 |
1 files changed, 126 insertions, 236 deletions
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h index 25f5e8a..bf0b804 100644 --- a/include/asm-mips/hazards.h +++ b/include/asm-mips/hazards.h @@ -11,103 +11,96 @@ #define _ASM_HAZARDS_H -#ifdef __ASSEMBLY__ - - .macro _ssnop - sll $0, $0, 1 - .endm - - .macro _ehb - sll $0, $0, 3 - .endm - -/* - * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent - * use of the JTLB for instructions should not occur for 4 cpu cycles and use - * for data translations should not occur for 3 cpu cycles. - */ -#ifdef CONFIG_CPU_RM9000 - - .macro mtc0_tlbw_hazard - .set push - .set mips32 - _ssnop; _ssnop; _ssnop; _ssnop - .set pop - .endm - - .macro tlbw_eret_hazard - .set push - .set mips32 - _ssnop; _ssnop; _ssnop; _ssnop - .set pop - .endm - +#ifdef __ASSEMBLER__ +#define ASMMACRO(name, code...) .macro name; code; .endm #else -/* - * The taken branch will result in a two cycle penalty for the two killed - * instructions on R4000 / R4400. Other processors only have a single cycle - * hazard so this is nice trick to have an optimal code for a range of - * processors. - */ - .macro mtc0_tlbw_hazard - b . + 8 - .endm +#define ASMMACRO(name, code...) \ +__asm__(".macro " #name "; " #code "; .endm"); \ + \ +static inline void name(void) \ +{ \ + __asm__ __volatile__ (#name); \ +} - .macro tlbw_eret_hazard - .endm #endif +ASMMACRO(_ssnop, + sll $0, $0, 1 + ) + +ASMMACRO(_ehb, + sll $0, $0, 3 + ) + /* - * mtc0->mfc0 hazard - * The 24K has a 2 cycle mtc0/mfc0 execution hazard. - * It is a MIPS32R2 processor so ehb will clear the hazard. + * TLB hazards */ +#if defined(CONFIG_CPU_MIPSR2) -#ifdef CONFIG_CPU_MIPSR2 /* - * Use a macro for ehb unless explicit support for MIPSR2 is enabled + * MIPSR2 defines ehb for hazard avoidance */ -#define irq_enable_hazard \ +ASMMACRO(mtc0_tlbw_hazard, + _ehb + ) +ASMMACRO(tlbw_use_hazard, + _ehb + ) +ASMMACRO(tlb_probe_hazard, + _ehb + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, _ehb - -#define irq_disable_hazard \ - _ehb - -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) - + ) +ASMMACRO(back_to_back_c0_hazard, + _ehb + ) /* - * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. + * gcc has a tradition of misscompiling the previous construct using the + * address of a label as argument to inline assembler. Gas otoh has the + * annoying difference between la and dla which are only usable for 32-bit + * rsp. 64-bit code, so can't be used without conditional compilation. + * The alterantive is switching the assembler to 64-bit code which happens + * to work right even for 32-bit code ... */ +#define instruction_hazard() \ +do { \ + unsigned long tmp; \ + \ + __asm__ __volatile__( \ + " .set mips64r2 \n" \ + " dla %0, 1f \n" \ + " jr.hb %0 \n" \ + " .set mips0 \n" \ + "1: \n" \ + : "=r" (tmp)); \ +} while (0) -#define irq_enable_hazard - -#define irq_disable_hazard - -#else +#elif defined(CONFIG_CPU_R10000) /* - * Classic MIPS needs 1 - 3 nops or ssnops + * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. */ -#define irq_enable_hazard -#define irq_disable_hazard \ - _ssnop; _ssnop; _ssnop -#endif - -#else /* __ASSEMBLY__ */ - -__asm__( - " .macro _ssnop \n" - " sll $0, $0, 1 \n" - " .endm \n" - " \n" - " .macro _ehb \n" - " sll $0, $0, 3 \n" - " .endm \n"); +ASMMACRO(mtc0_tlbw_hazard, + ) +ASMMACRO(tlbw_use_hazard, + ) +ASMMACRO(tlb_probe_hazard, + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + ) +ASMMACRO(back_to_back_c0_hazard, + ) +#define instruction_hazard() do { } while (0) -#ifdef CONFIG_CPU_RM9000 +#elif defined(CONFIG_CPU_RM9000) /* * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent @@ -115,176 +108,73 @@ __asm__( * for data translations should not occur for 3 cpu cycles. */ -#define mtc0_tlbw_hazard() \ - __asm__ __volatile__( \ - " .set mips32 \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " .set mips0 \n") - -#define tlbw_use_hazard() \ - __asm__ __volatile__( \ - " .set mips32 \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " _ssnop \n" \ - " .set mips0 \n") - -#else - -/* - * Overkill warning ... - */ -#define mtc0_tlbw_hazard() \ - __asm__ __volatile__( \ - " .set noreorder \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " .set reorder \n") - -#define tlbw_use_hazard() \ - __asm__ __volatile__( \ - " .set noreorder \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " nop \n" \ - " .set reorder \n") - -#endif - -/* - * Interrupt enable/disable hazards - * Some processors have hazards when modifying - * the status register to change the interrupt state - */ - -#ifdef CONFIG_CPU_MIPSR2 - -__asm__(" .macro irq_enable_hazard \n" - " _ehb \n" - " .endm \n" - " \n" - " .macro irq_disable_hazard \n" - " _ehb \n" - " .endm \n"); +ASMMACRO(mtc0_tlbw_hazard, + _ssnop; _ssnop; _ssnop; _ssnop + ) +ASMMACRO(tlbw_use_hazard, + _ssnop; _ssnop; _ssnop; _ssnop + ) +ASMMACRO(tlb_probe_hazard, + _ssnop; _ssnop; _ssnop; _ssnop + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + ) +ASMMACRO(back_to_back_c0_hazard, + ) +#define instruction_hazard() do { } while (0) -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) +#elif defined(CONFIG_CPU_SB1) /* - * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. + * Mostly like R4000 for historic reasons */ - -__asm__( - " .macro irq_enable_hazard \n" - " .endm \n" - " \n" - " .macro irq_disable_hazard \n" - " .endm \n"); +ASMMACRO(mtc0_tlbw_hazard, + ) +ASMMACRO(tlbw_use_hazard, + ) +ASMMACRO(tlb_probe_hazard, + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + _ssnop; _ssnop; _ssnop + ) +ASMMACRO(back_to_back_c0_hazard, + ) +#define instruction_hazard() do { } while (0) #else /* - * Default for classic MIPS processors. Assume worst case hazards but don't - * care about the irq_enable_hazard - sooner or later the hardware will - * enable it and we don't care when exactly. - */ - -__asm__( - " # \n" - " # There is a hazard but we do not care \n" - " # \n" - " .macro\tirq_enable_hazard \n" - " .endm \n" - " \n" - " .macro\tirq_disable_hazard \n" - " _ssnop \n" - " _ssnop \n" - " _ssnop \n" - " .endm \n"); - -#endif - -#define irq_enable_hazard() \ - __asm__ __volatile__("irq_enable_hazard") -#define irq_disable_hazard() \ - __asm__ __volatile__("irq_disable_hazard") - - -/* - * Back-to-back hazards - + * Finally the catchall case for all other processors including R4000, R4400, + * R4600, R4700, R5000, RM7000, NEC VR41xx etc. * - * What is needed to separate a move to cp0 from a subsequent read from the - * same cp0 register? - */ -#ifdef CONFIG_CPU_MIPSR2 - -__asm__(" .macro back_to_back_c0_hazard \n" - " _ehb \n" - " .endm \n"); - -#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \ - defined(CONFIG_CPU_SB1) - -__asm__(" .macro back_to_back_c0_hazard \n" - " .endm \n"); - -#else - -__asm__(" .macro back_to_back_c0_hazard \n" - " .set noreorder \n" - " _ssnop \n" - " _ssnop \n" - " _ssnop \n" - " .set reorder \n" - " .endm"); - -#endif - -#define back_to_back_c0_hazard() \ - __asm__ __volatile__("back_to_back_c0_hazard") - - -/* - * Instruction execution hazard - */ -#ifdef CONFIG_CPU_MIPSR2 -/* - * gcc has a tradition of misscompiling the previous construct using the - * address of a label as argument to inline assembler. Gas otoh has the - * annoying difference between la and dla which are only usable for 32-bit - * rsp. 64-bit code, so can't be used without conditional compilation. - * The alterantive is switching the assembler to 64-bit code which happens - * to work right even for 32-bit code ... + * The taken branch will result in a two cycle penalty for the two killed + * instructions on R4000 / R4400. Other processors only have a single cycle + * hazard so this is nice trick to have an optimal code for a range of + * processors. */ -#define instruction_hazard() \ -do { \ - unsigned long tmp; \ - \ - __asm__ __volatile__( \ - " .set mips64r2 \n" \ - " dla %0, 1f \n" \ - " jr.hb %0 \n" \ - " .set mips0 \n" \ - "1: \n" \ - : "=r" (tmp)); \ -} while (0) - -#else +ASMMACRO(mtc0_tlbw_hazard, + nop + ) +ASMMACRO(tlbw_use_hazard, + nop; nop; nop + ) +ASMMACRO(tlb_probe_hazard, + nop; nop; nop + ) +ASMMACRO(irq_enable_hazard, + ) +ASMMACRO(irq_disable_hazard, + nop; nop; nop + ) +ASMMACRO(back_to_back_c0_hazard, + _ssnop; _ssnop; _ssnop; + ) #define instruction_hazard() do { } while (0) -#endif - -extern void mips_ihb(void); -#endif /* __ASSEMBLY__ */ +#endif #endif /* _ASM_HAZARDS_H */ |