diff options
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r-- | arch/parisc/kernel/cache.c | 26 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/inventory.c | 41 | ||||
-rw-r--r-- | arch/parisc/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/parisc/kernel/module.c | 50 | ||||
-rw-r--r-- | arch/parisc/kernel/pci-dma.c | 12 | ||||
-rw-r--r-- | arch/parisc/kernel/pci.c | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/processor.c | 19 | ||||
-rw-r--r-- | arch/parisc/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/sys_parisc32.c | 62 | ||||
-rw-r--r-- | arch/parisc/kernel/syscall_table.S | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 90 | ||||
-rw-r--r-- | arch/parisc/kernel/traps.c | 20 |
14 files changed, 182 insertions, 162 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 837530e..b6ed34d 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -1,5 +1,4 @@ -/* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $ - * +/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -398,12 +397,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm); void clear_user_page_asm(void *page, unsigned long vaddr) { + unsigned long flags; /* This function is implemented in assembly in pacache.S */ extern void __clear_user_page_asm(void *page, unsigned long vaddr); - purge_tlb_start(); + purge_tlb_start(flags); __clear_user_page_asm(page, vaddr); - purge_tlb_end(); + purge_tlb_end(flags); } #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ @@ -444,20 +444,24 @@ extern void clear_user_page_asm(void *page, unsigned long vaddr); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { + unsigned long flags; + purge_kernel_dcache_page((unsigned long)page); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(page); - purge_tlb_end(); + purge_tlb_end(flags); clear_user_page_asm(page, vaddr); } EXPORT_SYMBOL(clear_user_page); void flush_kernel_dcache_page_addr(void *addr) { + unsigned long flags; + flush_kernel_dcache_page_asm(addr); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(addr); - purge_tlb_end(); + purge_tlb_end(flags); } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); @@ -490,8 +494,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { + unsigned long flags; + mtsp(sid, 1); - purge_tlb_start(); + purge_tlb_start(flags); if (split_tlb) { while (npages--) { pdtlb(start); @@ -504,7 +510,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, start += PAGE_SIZE; } } - purge_tlb_end(); + purge_tlb_end(flags); } } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index ae3e70cd..e552e54 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -553,7 +553,7 @@ * on most of those machines only handles cache transactions. */ extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 - depi 1,12,1,\prot + depdi 1,12,1,\prot /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ convert_for_tlb_insert20 \pte diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index bd1f7f1..d228d82 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -170,23 +170,27 @@ static void __init pagezero_memconfig(void) static int __init pat_query_module(ulong pcell_loc, ulong mod_index) { - pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; + pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; unsigned long bytecnt; unsigned long temp; /* 64-bit scratch value */ long status; /* PDC return value status */ struct parisc_device *dev; + pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); + if (!pa_pdc_cell) + panic("couldn't allocate memory for PDC_PAT_CELL!"); + /* return cell module (PA or Processor view) */ status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, - PA_VIEW, &pa_pdc_cell); + PA_VIEW, pa_pdc_cell); if (status != PDC_OK) { /* no more cell modules or error */ return status; } - temp = pa_pdc_cell.cba; - dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); + temp = pa_pdc_cell->cba; + dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); if (!dev) { return PDC_OK; } @@ -203,8 +207,8 @@ pat_query_module(ulong pcell_loc, ulong mod_index) /* save generic info returned from the call */ /* REVISIT: who is the consumer of this? not sure yet... */ - dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */ - dev->pmod_loc = pa_pdc_cell.mod_location; + dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ + dev->pmod_loc = pa_pdc_cell->mod_location; register_parisc_device(dev); /* advertise device */ @@ -216,14 +220,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index) case PAT_ENTITY_PROC: printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n", - pa_pdc_cell.mod[0]); + pa_pdc_cell->mod[0]); break; case PAT_ENTITY_MEM: printk(KERN_DEBUG "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n", - pa_pdc_cell.mod[0], pa_pdc_cell.mod[1], - pa_pdc_cell.mod[2]); + pa_pdc_cell->mod[0], pa_pdc_cell->mod[1], + pa_pdc_cell->mod[2]); break; case PAT_ENTITY_CA: printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc); @@ -243,23 +247,26 @@ pat_query_module(ulong pcell_loc, ulong mod_index) print_ranges: pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index, IO_VIEW, &io_pdc_cell); - printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]); - for (i = 0; i < pa_pdc_cell.mod[1]; i++) { + printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]); + for (i = 0; i < pa_pdc_cell->mod[1]; i++) { printk(KERN_DEBUG " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", - i, pa_pdc_cell.mod[2 + i * 3], /* type */ - pa_pdc_cell.mod[3 + i * 3], /* start */ - pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ + i, pa_pdc_cell->mod[2 + i * 3], /* type */ + pa_pdc_cell->mod[3 + i * 3], /* start */ + pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ printk(KERN_DEBUG " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", - i, io_pdc_cell.mod[2 + i * 3], /* type */ - io_pdc_cell.mod[3 + i * 3], /* start */ - io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */ + i, io_pdc_cell->mod[2 + i * 3], /* type */ + io_pdc_cell->mod[3 + i * 3], /* start */ + io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */ } printk(KERN_DEBUG "\n"); break; } #endif /* DEBUG_PAT */ + + kfree(pa_pdc_cell); + return PDC_OK; } diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 8007f1e..330f536 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) if (CHECK_IRQ_PER_CPU(irq)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ - cpumask_setall(&irq_desc[irq].affinity); + cpumask_setall(irq_desc[irq].affinity); return -EINVAL; } @@ -138,13 +138,13 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) if (cpu_dest < 0) return -1; - cpumask_copy(&irq_desc[irq].affinity, dest); + cpumask_copy(irq_desc[irq].affinity, dest); return 0; } #endif -static struct hw_interrupt_type cpu_interrupt_type = { +static struct irq_chip cpu_interrupt_type = { .typename = "CPU", .startup = cpu_startup_irq, .shutdown = cpu_disable_irq, @@ -299,7 +299,7 @@ int txn_alloc_irq(unsigned int bits_wide) unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP - cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu)); + cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; @@ -356,7 +356,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) irq = eirr_to_irq(eirr_val); #ifdef CONFIG_SMP - cpumask_copy(&dest, &irq_desc[irq].affinity); + cpumask_copy(&dest, irq_desc[irq].affinity); if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && !cpu_isset(smp_processor_id(), dest)) { int cpu = first_cpu(dest); diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index ef5caf2..61ee0ee 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -86,8 +86,12 @@ * the bottom of the table, which has a maximum signed displacement of * 0x3fff; however, since we're only going forward, this becomes * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have - * at most 1023 entries */ -#define MAX_GOTS 1023 + * at most 1023 entries. + * To overcome this 14bit displacement with some kernel modules, we'll + * use instead the unusal 16bit displacement method (see reassemble_16a) + * which gives us a maximum positive displacement of 0x7fff, and as such + * allows us to allocate up to 4095 GOT entries. */ +#define MAX_GOTS 4095 /* three functions to determine where in the module core * or init pieces the location is */ @@ -145,12 +149,40 @@ struct stub_entry { /* The reassemble_* functions prepare an immediate value for insertion into an opcode. pa-risc uses all sorts of weird bitfields in the instruction to hold the value. */ +static inline int sign_unext(int x, int len) +{ + int len_ones; + + len_ones = (1 << len) - 1; + return x & len_ones; +} + +static inline int low_sign_unext(int x, int len) +{ + int sign, temp; + + sign = (x >> (len-1)) & 1; + temp = sign_unext(x, len-1); + return (temp << 1) | sign; +} + static inline int reassemble_14(int as14) { return (((as14 & 0x1fff) << 1) | ((as14 & 0x2000) >> 13)); } +static inline int reassemble_16a(int as16) +{ + int s, t; + + /* Unusual 16-bit encoding, for wide mode only. */ + t = (as16 << 1) & 0xffff; + s = (as16 & 0x8000); + return (t ^ s ^ (s >> 1)) | (s >> 15); +} + + static inline int reassemble_17(int as17) { return (((as17 & 0x10000) >> 16) | @@ -407,6 +439,7 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) { struct stub_entry *stub; + int __maybe_unused d; /* initialize stub_offset to point in front of the section */ if (!me->arch.section[targetsec].stub_offset) { @@ -460,12 +493,19 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, */ switch (stub_type) { case ELF_STUB_GOT: - stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ + d = get_got(me, value, addend); + if (d <= 15) { + /* Format 5 */ + stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */ + stub->insns[0] |= low_sign_unext(d, 5) << 16; + } else { + /* Format 3 */ + stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ + stub->insns[0] |= reassemble_16a(d); + } stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */ stub->insns[2] = 0xe820d000; /* bve (%r1) */ stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */ - - stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff); break; case ELF_STUB_MILLI: stub->insns[0] = 0x20200000; /* ldil 0,%r1 */ diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 7d927ea..c07f618 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t * pte, if (end > PMD_SIZE) end = PMD_SIZE; do { + unsigned long flags; + if (!pte_none(*pte)) printk(KERN_ERR "map_pte_uncached: page already exists\n"); set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(orig_vaddr); - purge_tlb_end(); + purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; (*paddr_ptr) += PAGE_SIZE; @@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, if (end > PMD_SIZE) end = PMD_SIZE; do { + unsigned long flags; pte_t page = *pte; + pte_clear(&init_mm, vaddr, pte); - purge_tlb_start(); + purge_tlb_start(flags); pdtlb_kernel(orig_vaddr); - purge_tlb_end(); + purge_tlb_end(flags); vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE; pte++; diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 6936386..f7064ab 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -1,5 +1,4 @@ -/* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $ - * +/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 61c0707..1f3aa8d 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -156,7 +156,7 @@ void machine_power_off(void) * software. The user has to press the button himself. */ printk(KERN_EMERG "System shut down completed.\n" - KERN_EMERG "Please power this system off now."); + "Please power this system off now."); } void (*pm_power_off)(void) = machine_power_off; diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e09d0f7..c8fb61e 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -1,5 +1,4 @@ -/* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $ - * +/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds @@ -121,22 +120,28 @@ static int __cpuinit processor_probe(struct parisc_device *dev) if (is_pdc_pat()) { ulong status; unsigned long bytecnt; - pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; + pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; #undef USE_PAT_CPUID #ifdef USE_PAT_CPUID struct pdc_pat_cpu_num cpu_info; #endif + pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); + if (!pa_pdc_cell) + panic("couldn't allocate memory for PDC_PAT_CELL!"); + status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc, - dev->mod_index, PA_VIEW, &pa_pdc_cell); + dev->mod_index, PA_VIEW, pa_pdc_cell); BUG_ON(PDC_OK != status); /* verify it's the same as what do_pat_inventory() found */ - BUG_ON(dev->mod_info != pa_pdc_cell.mod_info); - BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location); + BUG_ON(dev->mod_info != pa_pdc_cell->mod_info); + BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location); + + txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */ - txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */ + kfree(pa_pdc_cell); #ifdef USE_PAT_CPUID /* We need contiguous numbers for cpuid. Firmware's notion diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 82131ca..cb71f3d 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -1,5 +1,4 @@ -/* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $ - * +/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 1adb40c..92a0aca 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -174,68 +174,6 @@ asmlinkage long sys32_sched_rr_get_interval(pid_t pid, return ret; } -/*** copied from mips64 ***/ -/* - * Ooo, nasty. We need here to frob 32-bit unsigned longs to - * 64-bit unsigned longs. - */ - -static inline int -get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) -{ - n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); - if (ufdset) { - unsigned long odd; - - if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32))) - return -EFAULT; - - odd = n & 1UL; - n &= ~1UL; - while (n) { - unsigned long h, l; - __get_user(l, ufdset); - __get_user(h, ufdset+1); - ufdset += 2; - *fdset++ = h << 32 | l; - n -= 2; - } - if (odd) - __get_user(*fdset, ufdset); - } else { - /* Tricky, must clear full unsigned long in the - * kernel fdset at the end, this makes sure that - * actually happens. - */ - memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32)); - } - return 0; -} - -static inline void -set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset) -{ - unsigned long odd; - n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32)); - - if (!ufdset) - return; - - odd = n & 1UL; - n &= ~1UL; - while (n) { - unsigned long h, l; - l = *fdset++; - h = l >> 32; - __put_user(l, ufdset); - __put_user(h, ufdset+1); - ufdset += 2; - n -= 2; - } - if (odd) - __put_user(*fdset, ufdset); -} - struct msgbuf32 { int mtype; char mtext[1]; diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 03b9a01..cf145eb 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -413,6 +413,10 @@ ENTRY_SAME(dup3) ENTRY_SAME(pipe2) ENTRY_SAME(inotify_init1) + ENTRY_COMP(preadv) /* 315 */ + ENTRY_COMP(pwritev) + ENTRY_COMP(rt_tgsigqueueinfo) + ENTRY_SAME(perf_counter_open) /* Nothing yet */ diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index d4dd056..a79c6f9 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -56,9 +56,9 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */ */ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) { - unsigned long now; + unsigned long now, now2; unsigned long next_tick; - unsigned long cycles_elapsed, ticks_elapsed; + unsigned long cycles_elapsed, ticks_elapsed = 1; unsigned long cycles_remainder; unsigned int cpu = smp_processor_id(); struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); @@ -71,44 +71,24 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) /* Initialize next_tick to the expected tick time. */ next_tick = cpuinfo->it_value; - /* Get current interval timer. - * CR16 reads as 64 bits in CPU wide mode. - * CR16 reads as 32 bits in CPU narrow mode. - */ + /* Get current cycle counter (Control Register 16). */ now = mfctl(16); cycles_elapsed = now - next_tick; - if ((cycles_elapsed >> 5) < cpt) { + if ((cycles_elapsed >> 6) < cpt) { /* use "cheap" math (add/subtract) instead * of the more expensive div/mul method */ cycles_remainder = cycles_elapsed; - ticks_elapsed = 1; while (cycles_remainder > cpt) { cycles_remainder -= cpt; ticks_elapsed++; } } else { + /* TODO: Reduce this to one fdiv op */ cycles_remainder = cycles_elapsed % cpt; - ticks_elapsed = 1 + cycles_elapsed / cpt; - } - - /* Can we differentiate between "early CR16" (aka Scenario 1) and - * "long delay" (aka Scenario 3)? I don't think so. - * - * We expected timer_interrupt to be delivered at least a few hundred - * cycles after the IT fires. But it's arbitrary how much time passes - * before we call it "late". I've picked one second. - */ - if (unlikely(ticks_elapsed > HZ)) { - /* Scenario 3: very long delay? bad in any case */ - printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" - " cycles %lX rem %lX " - " next/now %lX/%lX\n", - cpu, - cycles_elapsed, cycles_remainder, - next_tick, now ); + ticks_elapsed += cycles_elapsed / cpt; } /* convert from "division remainder" to "remainder of clock tick" */ @@ -122,18 +102,56 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) cpuinfo->it_value = next_tick; - /* Skip one clocktick on purpose if we are likely to miss next_tick. - * We want to avoid the new next_tick being less than CR16. - * If that happened, itimer wouldn't fire until CR16 wrapped. - * We'll catch the tick we missed on the tick after that. + /* Program the IT when to deliver the next interrupt. + * Only bottom 32-bits of next_tick are writable in CR16! */ - if (!(cycles_remainder >> 13)) - next_tick += cpt; - - /* Program the IT when to deliver the next interrupt. */ - /* Only bottom 32-bits of next_tick are written to cr16. */ mtctl(next_tick, 16); + /* Skip one clocktick on purpose if we missed next_tick. + * The new CR16 must be "later" than current CR16 otherwise + * itimer would not fire until CR16 wrapped - e.g 4 seconds + * later on a 1Ghz processor. We'll account for the missed + * tick on the next timer interrupt. + * + * "next_tick - now" will always give the difference regardless + * if one or the other wrapped. If "now" is "bigger" we'll end up + * with a very large unsigned number. + */ + now2 = mfctl(16); + if (next_tick - now2 > cpt) + mtctl(next_tick+cpt, 16); + +#if 1 +/* + * GGG: DEBUG code for how many cycles programming CR16 used. + */ + if (unlikely(now2 - now > 0x3000)) /* 12K cycles */ + printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!" + " cyc %lX rem %lX " + " next/now %lX/%lX\n", + cpu, now2 - now, cycles_elapsed, cycles_remainder, + next_tick, now ); +#endif + + /* Can we differentiate between "early CR16" (aka Scenario 1) and + * "long delay" (aka Scenario 3)? I don't think so. + * + * Timer_interrupt will be delivered at least a few hundred cycles + * after the IT fires. But it's arbitrary how much time passes + * before we call it "late". I've picked one second. + * + * It's important NO printk's are between reading CR16 and + * setting up the next value. May introduce huge variance. + */ + if (unlikely(ticks_elapsed > HZ)) { + /* Scenario 3: very long delay? bad in any case */ + printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" + " cycles %lX rem %lX " + " next/now %lX/%lX\n", + cpu, + cycles_elapsed, cycles_remainder, + next_tick, now ); + } /* Done mucking with unreliable delivery of interrupts. * Go do system house keeping. @@ -173,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); /* clock source code */ -static cycle_t read_cr16(void) +static cycle_t read_cr16(struct clocksource *cs) { return get_cycles(); } diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index c32f5d6..528f0ff 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -250,15 +250,14 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) oops_enter(); /* Amuse the user in a SPARC fashion */ - if (err) printk( -KERN_CRIT " _______________________________ \n" -KERN_CRIT " < Your System ate a SPARC! Gah! >\n" -KERN_CRIT " ------------------------------- \n" -KERN_CRIT " \\ ^__^\n" -KERN_CRIT " \\ (xx)\\_______\n" -KERN_CRIT " (__)\\ )\\/\\\n" -KERN_CRIT " U ||----w |\n" -KERN_CRIT " || ||\n"); + if (err) printk(KERN_CRIT + " _______________________________ \n" + " < Your System ate a SPARC! Gah! >\n" + " ------------------------------- \n" + " \\ ^__^\n" + " (__)\\ )\\/\\\n" + " U ||----w |\n" + " || ||\n"); /* unlock the pdc lock if necessary */ pdc_emergency_unlock(); @@ -797,7 +796,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) else printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", code); - printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm); + printk(KERN_CONT "pid=%d command='%s'\n", + task_pid_nr(current), current->comm); show_regs(regs); #endif si.si_signo = SIGSEGV; |