summaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-24 13:49:17 (GMT)
committerPaul Mundt <lethal@linux-sh.org>2009-08-24 13:49:17 (GMT)
commit12cceb6251c2cd23e936b25eca66be99ba41b081 (patch)
treeb7f62853e67b305519c375162760422fbfc81b8e /arch/sh
parentf13327864f94c3a0e6acca923df537d20059639f (diff)
parent05ecd5a1f76c183cca381705b3adb7d77c9a0439 (diff)
downloadlinux-12cceb6251c2cd23e936b25eca66be99ba41b081.tar.xz
Merge branch 'sh/st-integration'
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig27
-rw-r--r--arch/sh/boot/compressed/head_32.S2
-rw-r--r--arch/sh/drivers/pci/pci.c4
-rw-r--r--arch/sh/include/asm/Kbuild2
-rw-r--r--arch/sh/include/asm/cachectl.h19
-rw-r--r--arch/sh/include/asm/entry-macros.S2
-rw-r--r--arch/sh/include/asm/io.h16
-rw-r--r--arch/sh/include/asm/unistd_32.h2
-rw-r--r--arch/sh/include/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c1
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S2
-rw-r--r--arch/sh/kernel/entry-common.S5
-rw-r--r--arch/sh/kernel/io.c97
-rw-r--r--arch/sh/kernel/io_generic.c50
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/kgdb.c2
-rw-r--r--arch/sh/kernel/process_32.c20
-rw-r--r--arch/sh/kernel/setup.c6
-rw-r--r--arch/sh/kernel/signal_32.c12
-rw-r--r--arch/sh/kernel/sys_sh.c43
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/kernel/traps_32.c188
-rw-r--r--arch/sh/lib/clear_page.S2
-rw-r--r--arch/sh/lib/delay.c5
-rw-r--r--arch/sh/mm/cache-sh4.c45
-rw-r--r--arch/sh/mm/ioremap_32.c8
27 files changed, 452 insertions, 116 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 80b4f9a..2f5352c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -767,12 +767,31 @@ config UBC_WAKEUP
If unsure, say N.
-config CMDLINE_BOOL
- bool "Default bootloader kernel arguments"
+choice
+ prompt "Kernel command line"
+ optional
+ default CMDLINE_OVERWRITE
+ help
+ Setting this option allows the kernel command line arguments
+ to be set.
+
+config CMDLINE_OVERWRITE
+ bool "Overwrite bootloader kernel arguments"
+ help
+ Given string will overwrite any arguments passed in by
+ a bootloader.
+
+config CMDLINE_EXTEND
+ bool "Extend bootloader kernel arguments"
+ help
+ Given string will be concatenated with arguments passed in
+ by a bootloader.
+
+endchoice
config CMDLINE
- string "Initial kernel command string"
- depends on CMDLINE_BOOL
+ string "Kernel command line arguments string"
+ depends on CMDLINE_OVERWRITE || CMDLINE_EXTEND
default "console=ttySC1,115200"
endmenu
diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
index 06ac31f..02a3093 100644
--- a/arch/sh/boot/compressed/head_32.S
+++ b/arch/sh/boot/compressed/head_32.S
@@ -22,7 +22,7 @@ startup:
bt clear_bss
sub r0, r2
mov.l bss_start_addr, r0
- mov #0xe0, r1
+ mov #0xffffffe0, r1
and r1, r0 ! align cache line
mov.l text_start_addr, r3
mov r0, r1
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 9a1c423..c481df6 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -295,6 +295,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
+#ifndef CONFIG_GENERIC_IOMAP
+
static void __iomem *ioport_map_pci(struct pci_dev *dev,
unsigned long port, unsigned int nr)
{
@@ -346,6 +348,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
}
EXPORT_SYMBOL(pci_iounmap);
+#endif /* CONFIG_GENERIC_IOMAP */
+
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pcibios_resource_to_bus);
EXPORT_SYMBOL(pcibios_bus_to_resource);
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 43910cd..e121c30 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,6 +1,6 @@
include include/asm-generic/Kbuild.asm
-header-y += cpu-features.h
+header-y += cachectl.h cpu-features.h
unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
diff --git a/arch/sh/include/asm/cachectl.h b/arch/sh/include/asm/cachectl.h
new file mode 100644
index 0000000..6ffb4b7
--- /dev/null
+++ b/arch/sh/include/asm/cachectl.h
@@ -0,0 +1,19 @@
+#ifndef _SH_CACHECTL_H
+#define _SH_CACHECTL_H
+
+/* Definitions for the cacheflush system call. */
+
+#define CACHEFLUSH_D_INVAL 0x1 /* invalidate (without write back) */
+#define CACHEFLUSH_D_WB 0x2 /* write back (without invalidate) */
+#define CACHEFLUSH_D_PURGE 0x3 /* writeback and invalidate */
+
+#define CACHEFLUSH_I 0x4
+
+/*
+ * Options for cacheflush system call
+ */
+#define ICACHE CACHEFLUSH_I /* flush instruction cache */
+#define DCACHE CACHEFLUSH_D_PURGE /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+
+#endif /* _SH_CACHECTL_H */
diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S
index 64fd0de..cc43a55 100644
--- a/arch/sh/include/asm/entry-macros.S
+++ b/arch/sh/include/asm/entry-macros.S
@@ -7,7 +7,7 @@
.endm
.macro sti
- mov #0xf0, r11
+ mov #0xfffffff0, r11
extu.b r11, r11
not r11, r11
stc sr, r10
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 2534814..5be45ea 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -92,8 +92,12 @@
static inline void ctrl_delay(void)
{
-#ifdef P2SEG
+#ifdef CONFIG_CPU_SH4
+ __raw_readw(CCN_PVR);
+#elif defined(P2SEG)
__raw_readw(P2SEG);
+#else
+#error "Need a dummy address for delay"
#endif
}
@@ -146,6 +150,7 @@ __BUILD_MEMORY_STRING(q, u64)
#define readl_relaxed(a) readl(a)
#define readq_relaxed(a) readq(a)
+#ifndef CONFIG_GENERIC_IOMAP
/* Simple MMIO */
#define ioread8(a) __raw_readb(a)
#define ioread16(a) __raw_readw(a)
@@ -166,6 +171,15 @@ __BUILD_MEMORY_STRING(q, u64)
#define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c))
#define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c))
#define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c))
+#endif
+
+#define mmio_insb(p,d,c) __raw_readsb(p,d,c)
+#define mmio_insw(p,d,c) __raw_readsw(p,d,c)
+#define mmio_insl(p,d,c) __raw_readsl(p,d,c)
+
+#define mmio_outsb(p,s,c) __raw_writesb(p,s,c)
+#define mmio_outsw(p,s,c) __raw_writesw(p,s,c)
+#define mmio_outsl(p,s,c) __raw_writesl(p,s,c)
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 61d6ad9..925dd40 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -132,7 +132,7 @@
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
-#define __NR_modify_ldt 123
+#define __NR_cacheflush 123
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index a751699..2b84bc9 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -137,7 +137,7 @@
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
-#define __NR_modify_ldt 123
+#define __NR_cacheflush 123
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 808d99a..c1508a9 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -35,6 +35,7 @@ static void disable_ipr_irq(unsigned int irq)
unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
/* Set the priority in IPR to 0 */
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
+ (void)__raw_readw(addr); /* Read back to flush write posting */
}
static void enable_ipr_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 8c19e21..9421ec7 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -257,7 +257,7 @@ restore_all:
!
! Calculate new SR value
mov k3, k2 ! original SR value
- mov #0xf0, k1
+ mov #0xfffffff0, k1
extu.b k1, k1
not k1, k1
and k1, k2 ! Mask original SR value
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 7004776..68d9223 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -98,8 +98,9 @@ need_resched:
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
- and #0xf0, r0 ! interrupts off (exception path)?
- cmp/eq #0xf0, r0
+ shlr r0
+ and #(0xf0>>1), r0 ! interrupts off (exception path)?
+ cmp/eq #(0xf0>>1), r0
bt noresched
mov.l 3f, r0
jsr @r0 ! call preempt_schedule_irq
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 4f85fff..4770c24 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -1,12 +1,9 @@
/*
- * linux/arch/sh/kernel/io.c
+ * arch/sh/kernel/io.c - Machine independent I/O functions.
*
- * Copyright (C) 2000 Stuart Menefy
+ * Copyright (C) 2000 - 2009 Stuart Menefy
* Copyright (C) 2005 Paul Mundt
*
- * Provide real functions which expand to whatever the header file defined.
- * Also definitions of machine independent IO functions.
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -18,33 +15,87 @@
/*
* Copy data from IO memory space to "real" memory space.
- * This needs to be optimized.
*/
void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
{
- unsigned char *p = to;
- while (count) {
- count--;
- *p = readb(from);
- p++;
- from++;
- }
+ /*
+ * Would it be worthwhile doing byte and long transfers first
+ * to try and get aligned?
+ */
+#ifdef CONFIG_CPU_SH4
+ if ((count >= 0x20) &&
+ (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
+ int tmp2, tmp3, tmp4, tmp5, tmp6;
+
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l @%7+, %2 \n\t"
+ "movca.l r0, @%0 \n\t"
+ "mov.l @%7+, %3 \n\t"
+ "mov.l @%7+, %4 \n\t"
+ "mov.l @%7+, %5 \n\t"
+ "mov.l @%7+, %6 \n\t"
+ "mov.l @%7+, r7 \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l %2, @(0x04,%0) \n\t"
+ "mov #0x20, %2 \n\t"
+ "mov.l %3, @(0x08,%0) \n\t"
+ "sub %2, %1 \n\t"
+ "mov.l %4, @(0x0c,%0) \n\t"
+ "cmp/hi %1, %2 ! T if 32 > count \n\t"
+ "mov.l %5, @(0x10,%0) \n\t"
+ "mov.l %6, @(0x14,%0) \n\t"
+ "mov.l r7, @(0x18,%0) \n\t"
+ "mov.l r0, @(0x1c,%0) \n\t"
+ "bf.s 1b \n\t"
+ " add #0x20, %0 \n\t"
+ : "=&r" (to), "=&r" (count),
+ "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
+ "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
+ : "7"(from), "0" (to), "1" (count)
+ : "r0", "r7", "t", "memory");
+ }
+#endif
+
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for (; count > 3; count -= 4) {
+ *(u32 *)to = *(volatile u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(u8 *)to = *(volatile u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
- * This needs to be optimized.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
{
- const unsigned char *p = from;
- while (count) {
- count--;
- writeb(*p, to);
- p++;
- to++;
- }
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for ( ; count > 3; count -= 4) {
+ *(volatile u32 *)to = *(u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(volatile u8 *)to = *(u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
}
EXPORT_SYMBOL(memcpy_toio);
@@ -62,6 +113,8 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
}
EXPORT_SYMBOL(memset_io);
+#ifndef CONFIG_GENERIC_IOMAP
+
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
void __iomem *ret;
@@ -79,3 +132,5 @@ void ioport_unmap(void __iomem *addr)
sh_mv.mv_ioport_unmap(addr);
}
EXPORT_SYMBOL(ioport_unmap);
+
+#endif /* CONFIG_GENERIC_IOMAP */
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 5a7f554..4ff5072 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -73,35 +73,19 @@ u32 generic_inl_p(unsigned long port)
void generic_insb(unsigned long port, void *dst, unsigned long count)
{
- volatile u8 *port_addr;
- u8 *buf = dst;
-
- port_addr = (volatile u8 __force *)__ioport_map(port, 1);
- while (count--)
- *buf++ = *port_addr;
+ __raw_readsb(__ioport_map(port, 1), dst, count);
+ dummy_read();
}
void generic_insw(unsigned long port, void *dst, unsigned long count)
{
- volatile u16 *port_addr;
- u16 *buf = dst;
-
- port_addr = (volatile u16 __force *)__ioport_map(port, 2);
- while (count--)
- *buf++ = *port_addr;
-
+ __raw_readsw(__ioport_map(port, 2), dst, count);
dummy_read();
}
void generic_insl(unsigned long port, void *dst, unsigned long count)
{
- volatile u32 *port_addr;
- u32 *buf = dst;
-
- port_addr = (volatile u32 __force *)__ioport_map(port, 4);
- while (count--)
- *buf++ = *port_addr;
-
+ __raw_readsl(__ioport_map(port, 4), dst, count);
dummy_read();
}
@@ -145,37 +129,19 @@ void generic_outl_p(u32 b, unsigned long port)
*/
void generic_outsb(unsigned long port, const void *src, unsigned long count)
{
- volatile u8 *port_addr;
- const u8 *buf = src;
-
- port_addr = (volatile u8 __force *)__ioport_map(port, 1);
-
- while (count--)
- *port_addr = *buf++;
+ __raw_writesb(__ioport_map(port, 1), src, count);
+ dummy_read();
}
void generic_outsw(unsigned long port, const void *src, unsigned long count)
{
- volatile u16 *port_addr;
- const u16 *buf = src;
-
- port_addr = (volatile u16 __force *)__ioport_map(port, 2);
-
- while (count--)
- *port_addr = *buf++;
-
+ __raw_writesw(__ioport_map(port, 2), src, count);
dummy_read();
}
void generic_outsl(unsigned long port, const void *src, unsigned long count)
{
- volatile u32 *port_addr;
- const u32 *buf = src;
-
- port_addr = (volatile u32 __force *)__ioport_map(port, 4);
- while (count--)
- *port_addr = *buf++;
-
+ __raw_writesl(__ioport_map(port, 4), src, count);
dummy_read();
}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 278c68c..d105339 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -114,7 +114,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
#endif
irq_enter();
- irq = irq_demux(intc_evt2irq(irq));
+ irq = irq_demux(evt2irq(irq));
#ifdef CONFIG_IRQSTACKS
curctx = (union irq_ctx *)current_thread_info();
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 305aad7..d29de78 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -195,8 +195,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
regs->gbr = gdb_regs[GDB_GBR];
regs->mach = gdb_regs[GDB_MACH];
regs->macl = gdb_regs[GDB_MACL];
-
- __asm__ __volatile__ ("ldc %0, vbr" : : "r" (gdb_regs[GDB_VBR]));
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 9fee977..0673c47 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -32,15 +32,35 @@
#include <asm/ubc.h>
#include <asm/fpu.h>
#include <asm/syscalls.h>
+#include <asm/watchdog.h>
int ubc_usercnt = 0;
+#ifdef CONFIG_32BIT
+static void watchdog_trigger_immediate(void)
+{
+ sh_wdt_write_cnt(0xFF);
+ sh_wdt_write_csr(0xC2);
+}
+
+void machine_restart(char * __unused)
+{
+ local_irq_disable();
+
+ /* Use watchdog timer to trigger reset */
+ watchdog_trigger_immediate();
+
+ while (1)
+ cpu_sleep();
+}
+#else
void machine_restart(char * __unused)
{
/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
asm volatile("ldc %0, sr\n\t"
"mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
}
+#endif
void machine_halt(void)
{
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 212e6bdd..d13bbaf 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -404,10 +404,14 @@ void __init setup_arch(char **cmdline_p)
if (!memory_end)
memory_end = memory_start + __MEMORY_SIZE;
-#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
#else
strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
+#ifdef CONFIG_CMDLINE_EXTEND
+ strlcat(command_line, " ", sizeof(command_line));
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+#endif
#endif
/* Save unparsed command line copy for /proc/cmdline */
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index b5afbec..6010750 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -41,6 +41,16 @@ struct fdpic_func_descriptor {
};
/*
+ * The following define adds a 64 byte gap between the signal
+ * stack frame and previous contents of the stack. This allows
+ * frame unwinding in a function epilogue but only if a frame
+ * pointer is used in the function. This is necessary because
+ * current gcc compilers (<4.3) do not generate unwind info on
+ * SH for function epilogues.
+ */
+#define UNWINDGUARD 64
+
+/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int
@@ -327,7 +337,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
sp = current->sas_ss_sp + current->sas_ss_size;
}
- return (void __user *)((sp - frame_size) & -8ul);
+ return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
}
/* These symbols are defined with the addresses in the vsyscall page.
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 90d00e4..8aa5d1c 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -25,6 +25,8 @@
#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
static inline long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
@@ -179,6 +181,47 @@ asmlinkage int sys_ipc(uint call, int first, int second,
return -EINVAL;
}
+/* sys_cacheflush -- flush (part of) the processor cache. */
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
+{
+ struct vm_area_struct *vma;
+
+ if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
+ return -EINVAL;
+
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ if (addr + len < addr)
+ return -EFAULT;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma (current->mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ switch (op & CACHEFLUSH_D_PURGE) {
+ case CACHEFLUSH_D_INVAL:
+ __flush_invalidate_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_WB:
+ __flush_wback_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_PURGE:
+ __flush_purge_region((void *)addr, len);
+ break;
+ }
+
+ if (op & CACHEFLUSH_I)
+ flush_cache_all();
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+}
+
asmlinkage int sys_uname(struct old_utsname __user *name)
{
int err;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index f9e21fa..16ba225 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -139,7 +139,7 @@ ENTRY(sys_call_table)
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index bf420b6..af6fb74 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -143,7 +143,7 @@ sys_call_table:
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 05a04b6..c581dc3 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -24,6 +24,7 @@
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
+#include <linux/proc_fs.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/fpu.h>
@@ -44,6 +45,87 @@
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
+static unsigned long se_user;
+static unsigned long se_sys;
+static unsigned long se_skipped;
+static unsigned long se_half;
+static unsigned long se_word;
+static unsigned long se_dword;
+static unsigned long se_multi;
+/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
+ valid! */
+static int se_usermode = 3;
+/* 0: no warning 1: print a warning message */
+static int se_kernmode_warn = 1;
+
+#ifdef CONFIG_PROC_FS
+static const char *se_usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
+static int
+proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ char *p = page;
+ int len;
+
+ p += sprintf(p, "User:\t\t%lu\n", se_user);
+ p += sprintf(p, "System:\t\t%lu\n", se_sys);
+ p += sprintf(p, "Skipped:\t%lu\n", se_skipped);
+ p += sprintf(p, "Half:\t\t%lu\n", se_half);
+ p += sprintf(p, "Word:\t\t%lu\n", se_word);
+ p += sprintf(p, "DWord:\t\t%lu\n", se_dword);
+ p += sprintf(p, "Multi:\t\t%lu\n", se_multi);
+ p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode,
+ se_usermode_action[se_usermode]);
+ p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
+ se_kernmode_warn ? "+warn" : "");
+
+ len = (p - page) - off;
+ if (len < 0)
+ len = 0;
+
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+
+ return len;
+}
+
+static int proc_alignment_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char mode;
+
+ if (count > 0) {
+ if (get_user(mode, buffer))
+ return -EFAULT;
+ if (mode >= '0' && mode <= '5')
+ se_usermode = mode - '0';
+ }
+ return count;
+}
+
+static int proc_alignment_kern_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char mode;
+
+ if (count > 0) {
+ if (get_user(mode, buffer))
+ return -EFAULT;
+ if (mode >= '0' && mode <= '1')
+ se_kernmode_warn = mode - '0';
+ }
+ return count;
+}
+#endif
+
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
@@ -194,6 +276,13 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
count = 1<<(instruction&3);
+ switch (count) {
+ case 1: se_half += 1; break;
+ case 2: se_word += 1; break;
+ case 4: se_dword += 1; break;
+ case 8: se_multi += 1; break; /* ??? */
+ }
+
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
@@ -359,13 +448,6 @@ static inline int handle_delayslot(struct pt_regs *regs,
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
-/*
- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
- * opcodes..
- */
-
-static int handle_unaligned_notify_count = 10;
-
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
@@ -375,15 +457,13 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
- /* shout about the first ten userspace fixups */
- if (user_mode(regs) && handle_unaligned_notify_count>0) {
- handle_unaligned_notify_count--;
-
- printk(KERN_NOTICE "Fixing up unaligned userspace access "
+ /* shout about fixups */
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "Fixing up unaligned %s access "
"in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ user_mode(regs) ? "userspace" : "kernel",
current->comm, task_pid_nr(current),
(void *)regs->pc, instruction);
- }
ret = -EFAULT;
switch (instruction&0xF000) {
@@ -539,6 +619,36 @@ asmlinkage void do_address_error(struct pt_regs *regs,
local_irq_enable();
+ se_user += 1;
+
+#ifndef CONFIG_CPU_SH2A
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc & ~1), 2)) {
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+ set_fs(oldfs);
+
+ /* shout about userspace fixups */
+ if (se_usermode & 1)
+ printk(KERN_NOTICE "Unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, current->pid, (void *)regs->pc,
+ instruction);
+#endif
+
+ if (se_usermode & 2)
+ goto fixup;
+
+ if (se_usermode & 4)
+ goto uspace_segv;
+ else {
+ /* ignore */
+ regs->pc += instruction_size(instruction);
+ return;
+ }
+
+fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
@@ -546,15 +656,6 @@ asmlinkage void do_address_error(struct pt_regs *regs,
}
set_fs(USER_DS);
- if (copy_from_user(&instruction, (void __user *)(regs->pc),
- sizeof(instruction))) {
- /* Argh. Fault on the instruction itself.
- This should never happen non-SMP
- */
- set_fs(oldfs);
- goto uspace_segv;
- }
-
tmp = handle_unaligned_access(instruction, regs,
&user_mem_access);
set_fs(oldfs);
@@ -572,6 +673,14 @@ uspace_segv:
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
+ se_sys += 1;
+
+ if (se_kernmode_warn)
+ printk(KERN_NOTICE "Unaligned kernel access "
+ "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, current->pid, (void *)regs->pc,
+ instruction);
+
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
@@ -881,3 +990,38 @@ void dump_stack(void)
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * This needs to be done after sysctl_init, otherwise sys/ will be
+ * overwritten. Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ * We now locate it in /proc/cpu/alignment instead.
+ */
+static int __init alignment_init(void)
+{
+ struct proc_dir_entry *dir, *res;
+
+ dir = proc_mkdir("cpu", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir);
+ if (!res)
+ return -ENOMEM;
+
+ res->read_proc = proc_alignment_read;
+ res->write_proc = proc_alignment_write;
+
+ res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir);
+ if (!res)
+ return -ENOMEM;
+
+ res->read_proc = proc_alignment_read;
+ res->write_proc = proc_alignment_kern_write;
+
+ return 0;
+}
+
+fs_initcall(alignment_init);
+#endif
diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S
index 8342bfb..c92244d 100644
--- a/arch/sh/lib/clear_page.S
+++ b/arch/sh/lib/clear_page.S
@@ -57,7 +57,7 @@ ENTRY(clear_page)
ENTRY(__clear_user)
!
mov #0, r0
- mov #0xe0, r1 ! 0xffffffe0
+ mov #0xffffffe0, r1
!
! r4..(r4+31)&~32 -------- not aligned [ Area 0 ]
! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ]
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index f3ddd21..faa8f86 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -21,13 +21,14 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops)
{
+ xloops *= 4;
__asm__("dmulu.l %0, %2\n\t"
"sts mach, %0"
: "=r" (xloops)
: "0" (xloops),
- "r" (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy)
+ "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))
: "macl", "mach");
- __delay(xloops);
+ __delay(++xloops);
}
void __udelay(unsigned long usecs)
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 5cfe08d..397c103 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -581,6 +581,31 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
* Break the 1, 2 and 4 way variants of this out into separate functions to
* avoid nearly all the overhead of having the conditional stuff in the function
* bodies (+ the 1 and 2 way cases avoid saving any registers too).
+ *
+ * We want to eliminate unnecessary bus transactions, so this code uses
+ * a non-obvious technique.
+ *
+ * Loop over a cache way sized block of, one cache line at a time. For each
+ * line, use movca.a to cause the current cache line contents to be written
+ * back, but without reading anything from main memory. However this has the
+ * side effect that the cache is now caching that memory location. So follow
+ * this with a cache invalidate to mark the cache line invalid. And do all
+ * this with interrupts disabled, to avoid the cache line being accidently
+ * evicted while it is holding garbage.
+ *
+ * This also breaks in a number of circumstances:
+ * - if there are modifications to the region of memory just above
+ * empty_zero_page (for example because a breakpoint has been placed
+ * there), then these can be lost.
+ *
+ * This is because the the memory address which the cache temporarily
+ * caches in the above description is empty_zero_page. So the
+ * movca.l hits the cache (it is assumed that it misses, or at least
+ * isn't dirty), modifies the line and then invalidates it, losing the
+ * required change.
+ *
+ * - If caches are disabled or configured in write-through mode, then
+ * the movca.l writes garbage directly into memory.
*/
static void __flush_dcache_segment_1way(unsigned long start,
unsigned long extent_per_way)
@@ -630,6 +655,25 @@ static void __flush_dcache_segment_1way(unsigned long start,
} while (a0 < a0e);
}
+#ifdef CONFIG_CACHE_WRITETHROUGH
+/* This method of cache flushing avoids the problems discussed
+ * in the comment above if writethrough caches are enabled. */
+static void __flush_dcache_segment_2way(unsigned long start,
+ unsigned long extent_per_way)
+{
+ unsigned long array_addr;
+
+ array_addr = CACHE_OC_ADDRESS_ARRAY |
+ (start & cpu_data->dcache.entry_mask);
+
+ while (extent_per_way) {
+ ctrl_outl(0, array_addr);
+ ctrl_outl(0, array_addr + cpu_data->dcache.way_incr);
+ array_addr += cpu_data->dcache.linesz;
+ extent_per_way -= cpu_data->dcache.linesz;
+ }
+}
+#else
static void __flush_dcache_segment_2way(unsigned long start,
unsigned long extent_per_way)
{
@@ -688,6 +732,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
a1 += linesz;
} while (a0 < a0e);
}
+#endif
static void __flush_dcache_segment_4way(unsigned long start,
unsigned long extent_per_way)
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index da2f418..c325061 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -57,14 +57,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (is_pci_memory_fixed_range(phys_addr, size))
return (void __iomem *)phys_addr;
-#if !defined(CONFIG_PMB_FIXED)
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
- if (phys_addr < virt_to_phys(high_memory))
- return NULL;
-#endif
-
/*
* Mappings have to be page-aligned
*/