From 404571525db92bafeddb0cf9febb21aac6613dca Mon Sep 17 00:00:00 2001 From: Jesper Nilsson Date: Mon, 14 Jul 2008 08:59:48 +0200 Subject: [CRIS] Rename boot-linkscripts and fix the path to them. This makes the CRIS-port directories follow the same naming convention as the rest of the kernel. Signed-off-by: Jesper Nilsson diff --git a/arch/cris/arch-v10/boot/compressed/Makefile b/arch/cris/arch-v10/boot/compressed/Makefile index 08d943c..6fe0ffa 100644 --- a/arch/cris/arch-v10/boot/compressed/Makefile +++ b/arch/cris/arch-v10/boot/compressed/Makefile @@ -4,7 +4,7 @@ asflags-y += $(LINUXINCLUDE) ccflags-y += -O2 $(LINUXINCLUDE) -ldflags-y += -T $(srctree)/$(obj)/decompress.ld +ldflags-y += -T $(srctree)/$(src)/decompress.lds OBJECTS = $(obj)/head.o $(obj)/misc.o OBJCOPYFLAGS = -O binary --remove-section=.bss diff --git a/arch/cris/arch-v10/boot/compressed/decompress.ld b/arch/cris/arch-v10/boot/compressed/decompress.ld deleted file mode 100644 index e80f459..0000000 --- a/arch/cris/arch-v10/boot/compressed/decompress.ld +++ /dev/null @@ -1,30 +0,0 @@ -/* OUTPUT_FORMAT(elf32-us-cris) */ -OUTPUT_FORMAT(elf32-cris) - -MEMORY - { - dram : ORIGIN = 0x40700000, - LENGTH = 0x00100000 - } - -SECTIONS -{ - .text : - { - _stext = . ; - *(.text) - *(.rodata) - *(.rodata.*) - _etext = . ; - } > dram - .data : - { - *(.data) - _edata = . ; - } > dram - .bss : - { - *(.bss) - _end = ALIGN( 0x10 ) ; - } > dram -} diff --git a/arch/cris/arch-v10/boot/compressed/decompress.lds b/arch/cris/arch-v10/boot/compressed/decompress.lds new file mode 100644 index 0000000..e80f459 --- /dev/null +++ b/arch/cris/arch-v10/boot/compressed/decompress.lds @@ -0,0 +1,30 @@ +/* OUTPUT_FORMAT(elf32-us-cris) */ +OUTPUT_FORMAT(elf32-cris) + +MEMORY + { + dram : ORIGIN = 0x40700000, + LENGTH = 0x00100000 + } + +SECTIONS +{ + .text : + { + _stext = . ; + *(.text) + *(.rodata) + *(.rodata.*) + _etext = . ; + } > dram + .data : + { + *(.data) + _edata = . ; + } > dram + .bss : + { + *(.bss) + _end = ALIGN( 0x10 ) ; + } > dram +} diff --git a/arch/cris/arch-v10/boot/rescue/Makefile b/arch/cris/arch-v10/boot/rescue/Makefile index 07688da..82ab59b 100644 --- a/arch/cris/arch-v10/boot/rescue/Makefile +++ b/arch/cris/arch-v10/boot/rescue/Makefile @@ -4,7 +4,7 @@ ccflags-y += -O2 $(LINUXINCLUDE) asflags-y += $(LINUXINCLUDE) -ldflags-y += -T $(srctree)/$(obj)/rescue.ld +ldflags-y += -T $(srctree)/$(src)/rescue.lds OBJCOPYFLAGS = -O binary --remove-section=.bss obj-$(CONFIG_ETRAX_AXISFLASHMAP) = head.o OBJECT := $(obj)/head.o diff --git a/arch/cris/arch-v10/boot/rescue/rescue.ld b/arch/cris/arch-v10/boot/rescue/rescue.ld deleted file mode 100644 index 0b52a94..0000000 --- a/arch/cris/arch-v10/boot/rescue/rescue.ld +++ /dev/null @@ -1,20 +0,0 @@ -MEMORY - { - flash : ORIGIN = 0x00000000, - LENGTH = 0x00100000 - } - -SECTIONS -{ - .text : - { - stext = . ; - *(.text) - etext = . ; - } > flash - .data : - { - *(.data) - edata = . ; - } > flash -} diff --git a/arch/cris/arch-v10/boot/rescue/rescue.lds b/arch/cris/arch-v10/boot/rescue/rescue.lds new file mode 100644 index 0000000..0b52a94 --- /dev/null +++ b/arch/cris/arch-v10/boot/rescue/rescue.lds @@ -0,0 +1,20 @@ +MEMORY + { + flash : ORIGIN = 0x00000000, + LENGTH = 0x00100000 + } + +SECTIONS +{ + .text : + { + stext = . ; + *(.text) + etext = . ; + } > flash + .data : + { + *(.data) + edata = . ; + } > flash +} diff --git a/arch/cris/arch-v32/boot/compressed/Makefile b/arch/cris/arch-v32/boot/compressed/Makefile index d6335f2..5a1b31c 100644 --- a/arch/cris/arch-v32/boot/compressed/Makefile +++ b/arch/cris/arch-v32/boot/compressed/Makefile @@ -4,7 +4,7 @@ asflags-y += -I $(srctree)/include/asm/mach/ -I $(srctree)/include/asm/arch ccflags-y += -O2 -I $(srctree)/include/asm/mach/ -I $(srctree)/include/asm/arch -ldflags-y += -T $(srctree)/$(obj)/decompress.ld +ldflags-y += -T $(srctree)/$(src)/decompress.lds OBJECTS = $(obj)/head.o $(obj)/misc.o OBJCOPYFLAGS = -O binary --remove-section=.bss diff --git a/arch/cris/arch-v32/boot/compressed/decompress.ld b/arch/cris/arch-v32/boot/compressed/decompress.ld deleted file mode 100644 index 3c837fe..0000000 --- a/arch/cris/arch-v32/boot/compressed/decompress.ld +++ /dev/null @@ -1,30 +0,0 @@ -/*#OUTPUT_FORMAT(elf32-us-cris) */ -OUTPUT_ARCH (crisv32) - -MEMORY - { - dram : ORIGIN = 0x40700000, - LENGTH = 0x00100000 - } - -SECTIONS -{ - .text : - { - _stext = . ; - *(.text) - *(.rodata) - *(.rodata.*) - _etext = . ; - } > dram - .data : - { - *(.data) - _edata = . ; - } > dram - .bss : - { - *(.bss) - _end = ALIGN( 0x10 ) ; - } > dram -} diff --git a/arch/cris/arch-v32/boot/compressed/decompress.lds b/arch/cris/arch-v32/boot/compressed/decompress.lds new file mode 100644 index 0000000..3c837fe --- /dev/null +++ b/arch/cris/arch-v32/boot/compressed/decompress.lds @@ -0,0 +1,30 @@ +/*#OUTPUT_FORMAT(elf32-us-cris) */ +OUTPUT_ARCH (crisv32) + +MEMORY + { + dram : ORIGIN = 0x40700000, + LENGTH = 0x00100000 + } + +SECTIONS +{ + .text : + { + _stext = . ; + *(.text) + *(.rodata) + *(.rodata.*) + _etext = . ; + } > dram + .data : + { + *(.data) + _edata = . ; + } > dram + .bss : + { + *(.bss) + _end = ALIGN( 0x10 ) ; + } > dram +} diff --git a/arch/cris/arch-v32/boot/rescue/Makefile b/arch/cris/arch-v32/boot/rescue/Makefile index 44ae0ad..566aac6 100644 --- a/arch/cris/arch-v32/boot/rescue/Makefile +++ b/arch/cris/arch-v32/boot/rescue/Makefile @@ -7,7 +7,7 @@ ccflags-y += -O2 -I $(srctree)/include/asm/arch/mach/ \ -I $(srctree)/include/asm/arch asflags-y += -I $(srctree)/include/asm/arch/mach/ -I $(srctree)/include/asm/arch LD = gcc-cris -mlinux -march=v32 -nostdlib -ldflags-y += -T $(srctree)/$(obj)/rescue.ld +ldflags-y += -T $(srctree)/$(src)/rescue.lds LDPOSTFLAGS = -lgcc OBJCOPYFLAGS = -O binary --remove-section=.bss obj-$(CONFIG_ETRAX_AXISFLASHMAP) = head.o diff --git a/arch/cris/arch-v32/boot/rescue/rescue.ld b/arch/cris/arch-v32/boot/rescue/rescue.ld deleted file mode 100644 index 8ac646b..0000000 --- a/arch/cris/arch-v32/boot/rescue/rescue.ld +++ /dev/null @@ -1,43 +0,0 @@ -/*#OUTPUT_FORMAT(elf32-us-cris) */ -OUTPUT_ARCH (crisv32) -/* Now that NAND support has been stripped, this file could be simplified, - * but it doesn't do any harm on the other hand so why bother. */ - -MEMORY - { - bootblk : ORIGIN = 0x38000000, - LENGTH = 0x00004000 - intmem : ORIGIN = 0x38004000, - LENGTH = 0x00005000 - } - -SECTIONS -{ - .text : - { - _stext = . ; - *(.text) - *(.init.text) - *(.rodata) - *(.rodata.*) - _etext = . ; - } > bootblk - .data : - { - *(.data) - _edata = . ; - } > bootblk - .bss : - { - _bss = . ; - *(.bss) - _end = ALIGN( 0x10 ) ; - } > intmem - - /* Get rid of stuff from EXPORT_SYMBOL(foo). */ - /DISCARD/ : - { - *(__ksymtab_strings) - *(__ksymtab) - } -} diff --git a/arch/cris/arch-v32/boot/rescue/rescue.lds b/arch/cris/arch-v32/boot/rescue/rescue.lds new file mode 100644 index 0000000..8ac646b --- /dev/null +++ b/arch/cris/arch-v32/boot/rescue/rescue.lds @@ -0,0 +1,43 @@ +/*#OUTPUT_FORMAT(elf32-us-cris) */ +OUTPUT_ARCH (crisv32) +/* Now that NAND support has been stripped, this file could be simplified, + * but it doesn't do any harm on the other hand so why bother. */ + +MEMORY + { + bootblk : ORIGIN = 0x38000000, + LENGTH = 0x00004000 + intmem : ORIGIN = 0x38004000, + LENGTH = 0x00005000 + } + +SECTIONS +{ + .text : + { + _stext = . ; + *(.text) + *(.init.text) + *(.rodata) + *(.rodata.*) + _etext = . ; + } > bootblk + .data : + { + *(.data) + _edata = . ; + } > bootblk + .bss : + { + _bss = . ; + *(.bss) + _end = ALIGN( 0x10 ) ; + } > intmem + + /* Get rid of stuff from EXPORT_SYMBOL(foo). */ + /DISCARD/ : + { + *(__ksymtab_strings) + *(__ksymtab) + } +} -- cgit v0.10.2 From f84dbb912f344270f31d5cce974f12908a47798d Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 10 Jul 2008 14:48:54 -0700 Subject: genirq: enable polling for disabled screaming irqs When we disable a screaming irq we never see it again. If the irq line is shared or if the driver half works this is a real pain. So periodically poll the handlers for screaming interrupts. I use a timer instead of the classic irq poll technique of working off the timer interrupt because when we use the local apic timers note_interrupt is never called (bug?). Further on a system with dynamic ticks the timer interrupt might not even fire unless there is a timer telling it it needs to. I forced this case on my test system with an e1000 nic and my ssh session remained responsive despite the interrupt handler only being called every 10th of a second. Signed-off-by: Eric W. Biederman Signed-off-by: Ingo Molnar diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index c66d3f1..19fe9d6 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -12,83 +12,117 @@ #include #include #include +#include static int irqfixup __read_mostly; +#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) +static void poll_spurious_irqs(unsigned long dummy); +static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); + /* * Recovery handler for misrouted interrupts. */ -static int misrouted_irq(int irq) +static int try_one_irq(int irq, struct irq_desc *desc) { - int i; + struct irqaction *action; int ok = 0; int work = 0; /* Did we do work for a real IRQ */ - for (i = 1; i < NR_IRQS; i++) { - struct irq_desc *desc = irq_desc + i; - struct irqaction *action; - - if (i == irq) /* Already tried */ - continue; - - spin_lock(&desc->lock); - /* Already running on another processor */ - if (desc->status & IRQ_INPROGRESS) { - /* - * Already running: If it is shared get the other - * CPU to go looking for our mystery interrupt too - */ - if (desc->action && (desc->action->flags & IRQF_SHARED)) - desc->status |= IRQ_PENDING; - spin_unlock(&desc->lock); - continue; - } - /* Honour the normal IRQ locking */ - desc->status |= IRQ_INPROGRESS; - action = desc->action; + spin_lock(&desc->lock); + /* Already running on another processor */ + if (desc->status & IRQ_INPROGRESS) { + /* + * Already running: If it is shared get the other + * CPU to go looking for our mystery interrupt too + */ + if (desc->action && (desc->action->flags & IRQF_SHARED)) + desc->status |= IRQ_PENDING; spin_unlock(&desc->lock); + return ok; + } + /* Honour the normal IRQ locking */ + desc->status |= IRQ_INPROGRESS; + action = desc->action; + spin_unlock(&desc->lock); - while (action) { - /* Only shared IRQ handlers are safe to call */ - if (action->flags & IRQF_SHARED) { - if (action->handler(i, action->dev_id) == - IRQ_HANDLED) - ok = 1; - } - action = action->next; + while (action) { + /* Only shared IRQ handlers are safe to call */ + if (action->flags & IRQF_SHARED) { + if (action->handler(irq, action->dev_id) == + IRQ_HANDLED) + ok = 1; } - local_irq_disable(); - /* Now clean up the flags */ - spin_lock(&desc->lock); - action = desc->action; + action = action->next; + } + local_irq_disable(); + /* Now clean up the flags */ + spin_lock(&desc->lock); + action = desc->action; + /* + * While we were looking for a fixup someone queued a real + * IRQ clashing with our walk: + */ + while ((desc->status & IRQ_PENDING) && action) { /* - * While we were looking for a fixup someone queued a real - * IRQ clashing with our walk: - */ - while ((desc->status & IRQ_PENDING) && action) { - /* - * Perform real IRQ processing for the IRQ we deferred - */ - work = 1; - spin_unlock(&desc->lock); - handle_IRQ_event(i, action); - spin_lock(&desc->lock); - desc->status &= ~IRQ_PENDING; - } - desc->status &= ~IRQ_INPROGRESS; - /* - * If we did actual work for the real IRQ line we must let the - * IRQ controller clean up too + * Perform real IRQ processing for the IRQ we deferred */ - if (work && desc->chip && desc->chip->end) - desc->chip->end(i); + work = 1; spin_unlock(&desc->lock); + handle_IRQ_event(irq, action); + spin_lock(&desc->lock); + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + /* + * If we did actual work for the real IRQ line we must let the + * IRQ controller clean up too + */ + if (work && desc->chip && desc->chip->end) + desc->chip->end(irq); + spin_unlock(&desc->lock); + + return ok; +} + +static int misrouted_irq(int irq) +{ + int i; + int ok = 0; + + for (i = 1; i < NR_IRQS; i++) { + struct irq_desc *desc = irq_desc + i; + + if (i == irq) /* Already tried */ + continue; + + if (try_one_irq(i, desc)) + ok = 1; } /* So the caller can adjust the irq error counts */ return ok; } +static void poll_spurious_irqs(unsigned long dummy) +{ + int i; + for (i = 1; i < NR_IRQS; i++) { + struct irq_desc *desc = irq_desc + i; + unsigned int status; + + /* Racy but it doesn't matter */ + status = desc->status; + barrier(); + if (!(status & IRQ_SPURIOUS_DISABLED)) + continue; + + try_one_irq(i, desc); + } + + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); +} + /* * If 99,900 of the previous 100,000 interrupts have not been handled * then assume that the IRQ is stuck in some manner. Drop a diagnostic @@ -212,6 +246,8 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; desc->depth++; desc->chip->disable(irq); + + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } desc->irqs_unhandled = 0; } -- cgit v0.10.2 From 8d00a6c8f6b08e7167bc03bf955cdc7e47c5132e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 22 Jul 2008 08:39:57 +0200 Subject: genirq: remove last NO_IDLE_HZ leftovers Signed-off-by: Thomas Gleixner diff --git a/include/linux/irq.h b/include/linux/irq.h index 8ccb462..f3047df 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -197,10 +197,6 @@ extern int setup_irq(unsigned int irq, struct irqaction *new); #ifdef CONFIG_GENERIC_HARDIRQS -#ifndef handle_dynamic_tick -# define handle_dynamic_tick(a) do { } while (0) -#endif - #ifdef CONFIG_SMP #if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 5fa6198..f4c8a03 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -131,8 +131,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) irqreturn_t ret, retval = IRQ_NONE; unsigned int status = 0; - handle_dynamic_tick(action); - if (!(action->flags & IRQF_DISABLED)) local_irq_enable_in_hardirq(); -- cgit v0.10.2 From 1a960b402a51d80abf54e3f8e4972374ffe5f22d Mon Sep 17 00:00:00 2001 From: Jason Yeh Date: Wed, 23 Jul 2008 23:05:53 +0200 Subject: Oprofile Multiplexing Patch This patch introduces multiplexing support for the Oprofile kernel module. It basically adds a new function pointer in oprofile_operator allowing each architecture to supply its callback to switch between different sets of event when the timer expires. Userspace tools can modify the time slice through /dev/oprofile/time_slice. It also modifies the number of counters exposed to the userspace through /dev/oprofile. For example, the number of counters for AMD CPUs are changed to 32 and multiplexed in the sets of 4. Signed-off-by: Jason Yeh Signed-off-by: Robert Richter Cc: oprofile-list Signed-off-by: Ingo Molnar diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 287513a..2a65fe7 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -23,12 +23,18 @@ #include "op_counter.h" #include "op_x86_model.h" +DEFINE_PER_CPU(int, switch_index); + static struct op_x86_model_spec const *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); static int nmi_start(void); static void nmi_stop(void); +static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); +static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); +static void nmi_cpu_stop(void *dummy); +static void nmi_cpu_start(void *dummy); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; @@ -81,6 +87,47 @@ static void exit_sysfs(void) #define exit_sysfs() do { } while (0) #endif /* CONFIG_PM */ +static void nmi_cpu_switch(void *dummy) +{ + int cpu = smp_processor_id(); + int si = per_cpu(switch_index, cpu); + struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); + + nmi_cpu_stop(NULL); + nmi_cpu_save_mpx_registers(msrs); + + /* move to next set */ + si += model->num_hardware_counters; + if ((si > model->num_counters) || (counter_config[si].count == 0)) + per_cpu(switch_index, smp_processor_id()) = 0; + else + per_cpu(switch_index, smp_processor_id()) = si; + + nmi_cpu_restore_mpx_registers(msrs); + model->setup_ctrs(msrs); + nmi_cpu_start(NULL); +} + +/* + * Quick check to see if multiplexing is necessary. + * The check should be sufficient since counters are used + * in ordre. + */ +static int nmi_multiplex_on(void) +{ + return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; +} + +static int nmi_switch_event(void) +{ + if (nmi_multiplex_on() < 0) + return -EINVAL; + + on_each_cpu(nmi_cpu_switch, NULL, 0, 1); + + return 0; +} + static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -144,11 +191,10 @@ static void free_msrs(void) static int allocate_msrs(void) { - int success = 1; + int i, success = 1; size_t controls_size = sizeof(struct op_msr) * model->num_controls; size_t counters_size = sizeof(struct op_msr) * model->num_counters; - int i; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, GFP_KERNEL); @@ -156,8 +202,8 @@ static int allocate_msrs(void) success = 0; break; } - per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, - GFP_KERNEL); + per_cpu(cpu_msrs, i).controls = + kmalloc(controls_size, GFP_KERNEL); if (!per_cpu(cpu_msrs, i).controls) { success = 0; break; @@ -201,7 +247,8 @@ static int nmi_setup(void) return err; } - /* We need to serialize save and setup for HT because the subset + /* + * We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ @@ -217,7 +264,6 @@ static int nmi_setup(void) per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); } - } on_each_cpu(nmi_save_registers, NULL, 1); on_each_cpu(nmi_cpu_setup, NULL, 1); @@ -225,7 +271,41 @@ static int nmi_setup(void) return 0; } -static void nmi_restore_registers(struct op_msrs *msrs) +static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) +{ + unsigned int si = __get_cpu_var(switch_index); + unsigned int const nr_ctrs = model->num_hardware_counters; + struct op_msr *counters = &msrs->counters[si]; + unsigned int i; + + for (i = 0; i < nr_ctrs; ++i) { + int offset = i + si; + if (counters[offset].addr) { + rdmsr(counters[offset].addr, + counters[offset].multiplex.low, + counters[offset].multiplex.high); + } + } +} + +static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) +{ + unsigned int si = __get_cpu_var(switch_index); + unsigned int const nr_ctrs = model->num_hardware_counters; + struct op_msr *counters = &msrs->counters[si]; + unsigned int i; + + for (i = 0; i < nr_ctrs; ++i) { + int offset = i + si; + if (counters[offset].addr) { + wrmsr(counters[offset].addr, + counters[offset].multiplex.low, + counters[offset].multiplex.high); + } + } +} + +static void nmi_cpu_restore_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; unsigned int const nr_ctrls = model->num_controls; @@ -265,7 +345,8 @@ static void nmi_cpu_shutdown(void *dummy) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); apic_write(APIC_LVTERR, v); - nmi_restore_registers(msrs); + nmi_cpu_restore_registers(msrs); + __get_cpu_var(switch_index) = 0; } static void nmi_shutdown(void) @@ -328,6 +409,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); + counter_config[i].save_count_low = 0; } return 0; @@ -469,12 +551,14 @@ int __init op_nmi_init(struct oprofile_operations *ops) } /* default values, can be overwritten by model */ + __get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; + ops->switch_events = nmi_switch_event; if (model->init) ret = model->init(ops); diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 2880b15..786d6e0 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h @@ -10,13 +10,14 @@ #ifndef OP_COUNTER_H #define OP_COUNTER_H -#define OP_MAX_COUNTER 8 +#define OP_MAX_COUNTER 32 /* Per-perfctr configuration as set via * oprofilefs. */ struct op_counter_config { unsigned long count; + unsigned long save_count_low; unsigned long enabled; unsigned long event; unsigned long kernel; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index d9faf60..bbf2b68 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -23,8 +24,10 @@ #include "op_x86_model.h" #include "op_counter.h" -#define NUM_COUNTERS 4 -#define NUM_CONTROLS 4 +#define NUM_COUNTERS 32 +#define NUM_HARDWARE_COUNTERS 4 +#define NUM_CONTROLS 32 +#define NUM_HARDWARE_CONTROLS 4 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) @@ -48,6 +51,7 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; +DECLARE_PER_CPU(int, switch_index); #ifdef CONFIG_OPROFILE_IBS @@ -130,15 +134,17 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) int i; for (i = 0; i < NUM_COUNTERS; i++) { - if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) - msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; + int hw_counter = i % NUM_HARDWARE_COUNTERS; + if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter)) + msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter; else msrs->counters[i].addr = 0; } for (i = 0; i < NUM_CONTROLS; i++) { - if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) - msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; + int hw_control = i % NUM_HARDWARE_CONTROLS; + if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control)) + msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control; else msrs->controls[i].addr = 0; } @@ -150,8 +156,16 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) unsigned int low, high; int i; + for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) { + int offset = i + __get_cpu_var(switch_index); + if (counter_config[offset].enabled) + reset_value[offset] = counter_config[offset].count; + else + reset_value[offset] = 0; + } + /* clear all counters */ - for (i = 0 ; i < NUM_CONTROLS; ++i) { + for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); @@ -161,34 +175,31 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) } /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < NUM_COUNTERS; ++i) { + for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ - for (i = 0; i < NUM_COUNTERS; ++i) { - if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { - reset_value[i] = counter_config[i].count; - - CTR_WRITE(counter_config[i].count, msrs, i); + for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { + int offset = i + __get_cpu_var(switch_index); + if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) { + CTR_WRITE(counter_config[offset].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[i].event); - CTRL_SET_EVENT_HIGH(high, counter_config[i].event); + CTRL_SET_USR(low, counter_config[offset].user); + CTRL_SET_KERN(low, counter_config[offset].kernel); + CTRL_SET_UM(low, counter_config[offset].unit_mask); + CTRL_SET_EVENT_LOW(low, counter_config[offset].event); + CTRL_SET_EVENT_HIGH(high, counter_config[offset].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); - } else { - reset_value[i] = 0; } } } @@ -276,13 +287,14 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, unsigned int low, high; int i; - for (i = 0 ; i < NUM_COUNTERS; ++i) { - if (!reset_value[i]) + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + int offset = i + __get_cpu_var(switch_index); + if (!reset_value[offset]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { - oprofile_add_sample(regs, i); - CTR_WRITE(reset_value[i], msrs, i); + oprofile_add_sample(regs, offset); + CTR_WRITE(reset_value[offset], msrs, i); } } @@ -298,8 +310,10 @@ static void op_amd_start(struct op_msrs const * const msrs) { unsigned int low, high; int i; - for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (reset_value[i]) { + + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + int offset = i + __get_cpu_var(switch_index); + if (reset_value[offset]) { CTRL_READ(low, high, msrs, i); CTRL_SET_ACTIVE(low); CTRL_WRITE(low, high, msrs, i); @@ -329,8 +343,8 @@ static void op_amd_stop(struct op_msrs const * const msrs) /* Subtle: stop on all counters to avoid race with * setting our pm callback */ - for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (!reset_value[i]) + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + if (!reset_value[i + per_cpu(switch_index, smp_processor_id())]) continue; CTRL_READ(low, high, msrs, i); CTRL_SET_INACTIVE(low); @@ -356,11 +370,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) { int i; - for (i = 0 ; i < NUM_COUNTERS ; ++i) { + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } - for (i = 0 ; i < NUM_CONTROLS ; ++i) { + for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } @@ -534,6 +548,8 @@ struct op_x86_model_spec const op_amd_spec = { .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, + .num_hardware_counters = NUM_HARDWARE_COUNTERS, + .num_hardware_controls = NUM_HARDWARE_CONTROLS, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 56b4757..e641545 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -701,6 +701,8 @@ static void p4_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, + .num_hardware_counters = NUM_COUNTERS_HT2, + .num_hardware_controls = NUM_CONTROLS_HT2, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, @@ -713,6 +715,8 @@ struct op_x86_model_spec const op_p4_ht2_spec = { struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, .num_controls = NUM_CONTROLS_NON_HT, + .num_hardware_counters = NUM_COUNTERS_NON_HT, + .num_hardware_controls = NUM_CONTROLS_NON_HT, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index eff431f..e5811aa 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -183,6 +183,8 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, + .num_hardware_counters = NUM_COUNTERS, + .num_hardware_controls = NUM_CONTROLS, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 05a0261..e07ba10 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -19,6 +19,7 @@ struct op_saved_msr { struct op_msr { unsigned long addr; struct op_saved_msr saved; + struct op_saved_msr multiplex; }; struct op_msrs { @@ -34,6 +35,8 @@ struct pt_regs; struct op_x86_model_spec { int (*init)(struct oprofile_operations *ops); void (*exit)(void); + unsigned int const num_hardware_counters; + unsigned int const num_hardware_controls; unsigned int const num_counters; unsigned int const num_controls; void (*fill_in_addresses)(struct op_msrs * const msrs); diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index 2c64517..b2fa5df 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include "oprof.h" @@ -19,13 +21,18 @@ #include "cpu_buffer.h" #include "buffer_sync.h" #include "oprofile_stats.h" + +static unsigned long is_setup; +static void switch_worker(struct work_struct *work); +static DECLARE_DELAYED_WORK(switch_work, switch_worker); +static DEFINE_MUTEX(start_mutex); struct oprofile_operations oprofile_ops; +unsigned long timeout_jiffies; unsigned long oprofile_started; unsigned long backtrace_depth; -static unsigned long is_setup; -static DEFINE_MUTEX(start_mutex); +/* Multiplexing defaults at 1 msec*/ /* timer 0 - use performance monitoring hardware if available @@ -87,6 +94,16 @@ out: return err; } +static void start_switch_worker(void) +{ + schedule_delayed_work(&switch_work, timeout_jiffies); +} + +static void switch_worker(struct work_struct *work) +{ + if (!oprofile_ops.switch_events()) + start_switch_worker(); +} /* Actually start profiling (echo 1>/dev/oprofile/enable) */ int oprofile_start(void) @@ -94,7 +111,6 @@ int oprofile_start(void) int err = -EINVAL; mutex_lock(&start_mutex); - if (!is_setup) goto out; @@ -108,6 +124,9 @@ int oprofile_start(void) if ((err = oprofile_ops.start())) goto out; + if (oprofile_ops.switch_events) + start_switch_worker(); + oprofile_started = 1; out: mutex_unlock(&start_mutex); @@ -123,6 +142,7 @@ void oprofile_stop(void) goto out; oprofile_ops.stop(); oprofile_started = 0; + cancel_delayed_work_sync(&switch_work); /* wake up the daemon to read what remains */ wake_up_buffer_waiter(); out: @@ -155,6 +175,32 @@ post_sync: mutex_unlock(&start_mutex); } +/* User inputs in ms, converts to jiffies */ +int oprofile_set_timeout(unsigned long val_msec) +{ + int err = 0; + + mutex_lock(&start_mutex); + + if (oprofile_started) { + err = -EBUSY; + goto out; + } + + if (!oprofile_ops.switch_events) { + err = -EINVAL; + goto out; + } + + timeout_jiffies = msecs_to_jiffies(val_msec); + if (timeout_jiffies == MAX_JIFFY_OFFSET) + timeout_jiffies = msecs_to_jiffies(1); + +out: + mutex_unlock(&start_mutex); + return err; + +} int oprofile_set_backtrace(unsigned long val) { @@ -179,10 +225,16 @@ out: return err; } +static void __init oprofile_switch_timer_init(void) +{ + timeout_jiffies = msecs_to_jiffies(1); +} + static int __init oprofile_init(void) { int err; + oprofile_switch_timer_init(); err = oprofile_arch_init(&oprofile_ops); if (err < 0 || timer) { diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index 1832365..c4406a7 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h @@ -27,7 +27,8 @@ extern unsigned long fs_buffer_watershed; extern struct oprofile_operations oprofile_ops; extern unsigned long oprofile_started; extern unsigned long backtrace_depth; - +extern unsigned long timeout_jiffies; + struct super_block; struct dentry; @@ -35,5 +36,6 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root); void oprofile_timer_init(struct oprofile_operations * ops); int oprofile_set_backtrace(unsigned long depth); +int oprofile_set_timeout(unsigned long time); #endif /* OPROF_H */ diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index ef953ba..cc4f5a1 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -9,6 +9,7 @@ #include #include +#include #include "event_buffer.h" #include "oprofile_stats.h" @@ -18,6 +19,40 @@ unsigned long fs_buffer_size = 131072; unsigned long fs_cpu_buffer_size = 8192; unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ +static ssize_t timeout_read(struct file *file, char __user *buf, + size_t count, loff_t *offset) +{ + return oprofilefs_ulong_to_user(jiffies_to_msecs(timeout_jiffies), + buf, count, offset); +} + + +static ssize_t timeout_write(struct file *file, char const __user *buf, + size_t count, loff_t *offset) +{ + unsigned long val; + int retval; + + if (*offset) + return -EINVAL; + + retval = oprofilefs_ulong_from_user(&val, buf, count); + if (retval) + return retval; + + retval = oprofile_set_timeout(val); + + if (retval) + return retval; + return count; +} + +static const struct file_operations timeout_fops = { + .read = timeout_read, + .write = timeout_write, +}; + + static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset) { return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); @@ -85,11 +120,10 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t if (*offset) return -EINVAL; - retval = oprofilefs_ulong_from_user(&val, buf, count); if (retval) return retval; - + if (val) retval = oprofile_start(); else @@ -129,6 +163,7 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root) oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); + oprofilefs_create_file(sb, root, "timeout_ms", &timeout_fops); oprofile_create_stats_files(sb, root); if (oprofile_ops.create_files) oprofile_ops.create_files(sb, root); diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index bcb8f72..687f2f4 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -67,6 +67,9 @@ struct oprofile_operations { /* Initiate a stack backtrace. Optional. */ void (*backtrace)(struct pt_regs * const regs, unsigned int depth); + + /* Multiplex between different events. Optional. */ + int (*switch_events)(void); /* CPU identification string. */ char * cpu_type; }; -- cgit v0.10.2 From 7e7b43892b87b6be259479ef4de14029dcb4012f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 24 Jul 2008 16:00:16 +0200 Subject: x86/oprofile: fix on_each_cpu build error Signed-off-by: Robert Richter Cc: oprofile-list Cc: Jason Yeh Signed-off-by: Ingo Molnar diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2a65fe7..fb4902b 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -123,7 +123,7 @@ static int nmi_switch_event(void) if (nmi_multiplex_on() < 0) return -EINVAL; - on_each_cpu(nmi_cpu_switch, NULL, 0, 1); + on_each_cpu(nmi_cpu_switch, NULL, 1); return 0; } -- cgit v0.10.2 From 34a82443b79dcda4304b229d555586296da40c16 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 30 Jul 2008 12:35:05 -0700 Subject: [MTD] dataflash OTP support Now that we can tell when we have one of the newer DataFlash chips, optionally expose the 128 bytes of OTP memory they provide. Tested on at45db642 revision B and D chips. Switch mtdchar over to a generic HAVE_MTD_OTP flag instead of adding another #ifdef for each type of chip whose driver has OTP support. Signed-off-by: David Brownell Cc: Bryan Wu Cc: Michael Hennerich Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 14f11f8..a90d50c 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -172,6 +172,11 @@ config MTD_CHAR memory chips, and also use ioctl() to obtain information about the device, or to erase parts of it. +config HAVE_MTD_OTP + bool + help + Enable access to OTP regions using MTD_CHAR. + config MTD_BLKDEVS tristate "Common interface to block layer for MTD 'translation layers'" depends on BLOCK diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index 479d32b..4c35e5d 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -154,6 +154,7 @@ config MTD_CFI_I8 config MTD_OTP bool "Protection Registers aka one-time programmable (OTP) bits" depends on MTD_CFI_ADV_OPTIONS + select HAVE_MTD_OTP default n help This enables support for reading, writing and locking so called diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 9c613f0..88f4df0 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -59,6 +59,17 @@ config MTD_DATAFLASH Sometimes DataFlash chips are packaged inside MMC-format cards; at this writing, the MMC stack won't handle those. +config MTD_DATAFLASH_OTP + bool "DataFlash OTP support (Security Register)" + depends on MTD_DATAFLASH + select HAVE_MTD_OTP + help + Newer DataFlash chips (revisions C and D) support 128 bytes of + one-time-programmable (OTP) data. The first half may be written + (once) with up to 64 bytes of data, such as a serial number or + other key product data. The second half is programmed with a + unique-to-each-chip bit pattern at the factory. + config MTD_M25P80 tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)" depends on SPI_MASTER && EXPERIMENTAL diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 8bd0dea..17c9b20 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -80,7 +80,8 @@ */ #define OP_READ_ID 0x9F #define OP_READ_SECURITY 0x77 -#define OP_WRITE_SECURITY 0x9A /* OTP bits */ +#define OP_WRITE_SECURITY_REVC 0x9A +#define OP_WRITE_SECURITY 0x9B /* revision D */ struct dataflash { @@ -451,16 +452,192 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, /* ......................................................................... */ +#ifdef CONFIG_MTD_DATAFLASH_OTP + +static int dataflash_get_otp_info(struct mtd_info *mtd, + struct otp_info *info, size_t len) +{ + /* Report both blocks as identical: bytes 0..64, locked. + * Unless the user block changed from all-ones, we can't + * tell whether it's still writable; so we assume it isn't. + */ + info->start = 0; + info->length = 64; + info->locked = 1; + return sizeof(*info); +} + +static ssize_t otp_read(struct spi_device *spi, unsigned base, + uint8_t *buf, loff_t off, size_t len) +{ + struct spi_message m; + size_t l; + uint8_t *scratch; + struct spi_transfer t; + int status; + + if (off > 64) + return -EINVAL; + + if ((off + len) > 64) + len = 64 - off; + if (len == 0) + return len; + + spi_message_init(&m); + + l = 4 + base + off + len; + scratch = kzalloc(l, GFP_KERNEL); + if (!scratch) + return -ENOMEM; + + /* OUT: OP_READ_SECURITY, 3 don't-care bytes, zeroes + * IN: ignore 4 bytes, data bytes 0..N (max 127) + */ + scratch[0] = OP_READ_SECURITY; + + memset(&t, 0, sizeof t); + t.tx_buf = scratch; + t.rx_buf = scratch; + t.len = l; + spi_message_add_tail(&t, &m); + + dataflash_waitready(spi); + + status = spi_sync(spi, &m); + if (status >= 0) { + memcpy(buf, scratch + 4 + base + off, len); + status = len; + } + + kfree(scratch); + return status; +} + +static int dataflash_read_fact_otp(struct mtd_info *mtd, + loff_t from, size_t len, size_t *retlen, u_char *buf) +{ + struct dataflash *priv = (struct dataflash *)mtd->priv; + int status; + + /* 64 bytes, from 0..63 ... start at 64 on-chip */ + mutex_lock(&priv->lock); + status = otp_read(priv->spi, 64, buf, from, len); + mutex_unlock(&priv->lock); + + if (status < 0) + return status; + *retlen = status; + return 0; +} + +static int dataflash_read_user_otp(struct mtd_info *mtd, + loff_t from, size_t len, size_t *retlen, u_char *buf) +{ + struct dataflash *priv = (struct dataflash *)mtd->priv; + int status; + + /* 64 bytes, from 0..63 ... start at 0 on-chip */ + mutex_lock(&priv->lock); + status = otp_read(priv->spi, 0, buf, from, len); + mutex_unlock(&priv->lock); + + if (status < 0) + return status; + *retlen = status; + return 0; +} + +static int dataflash_write_user_otp(struct mtd_info *mtd, + loff_t from, size_t len, size_t *retlen, u_char *buf) +{ + struct spi_message m; + const size_t l = 4 + 64; + uint8_t *scratch; + struct spi_transfer t; + struct dataflash *priv = (struct dataflash *)mtd->priv; + int status; + + if (len > 64) + return -EINVAL; + + /* Strictly speaking, we *could* truncate the write ... but + * let's not do that for the only write that's ever possible. + */ + if ((from + len) > 64) + return -EINVAL; + + /* OUT: OP_WRITE_SECURITY, 3 zeroes, 64 data-or-zero bytes + * IN: ignore all + */ + scratch = kzalloc(l, GFP_KERNEL); + if (!scratch) + return -ENOMEM; + scratch[0] = OP_WRITE_SECURITY; + memcpy(scratch + 4 + from, buf, len); + + spi_message_init(&m); + + memset(&t, 0, sizeof t); + t.tx_buf = scratch; + t.len = l; + spi_message_add_tail(&t, &m); + + /* Write the OTP bits, if they've not yet been written. + * This modifies SRAM buffer1. + */ + mutex_lock(&priv->lock); + dataflash_waitready(priv->spi); + status = spi_sync(priv->spi, &m); + mutex_unlock(&priv->lock); + + kfree(scratch); + + if (status >= 0) { + status = 0; + *retlen = len; + } + return status; +} + +static char *otp_setup(struct mtd_info *device, char revision) +{ + device->get_fact_prot_info = dataflash_get_otp_info; + device->read_fact_prot_reg = dataflash_read_fact_otp; + device->get_user_prot_info = dataflash_get_otp_info; + device->read_user_prot_reg = dataflash_read_user_otp; + + /* rev c parts (at45db321c and at45db1281 only!) use a + * different write procedure; not (yet?) implemented. + */ + if (revision > 'c') + device->write_user_prot_reg = dataflash_write_user_otp; + + return ", OTP"; +} + +#else + +static char *otp_setup(struct mtd_info *device) +{ + return " (OTP)"; +} + +#endif + +/* ......................................................................... */ + /* * Register DataFlash device with MTD subsystem. */ static int __devinit -add_dataflash(struct spi_device *spi, char *name, - int nr_pages, int pagesize, int pageoffset) +add_dataflash_otp(struct spi_device *spi, char *name, + int nr_pages, int pagesize, int pageoffset, char revision) { struct dataflash *priv; struct mtd_info *device; struct flash_platform_data *pdata = spi->dev.platform_data; + char *otp_tag = ""; priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) @@ -489,8 +666,12 @@ add_dataflash(struct spi_device *spi, char *name, device->write = dataflash_write; device->priv = priv; - dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes\n", - name, DIV_ROUND_UP(device->size, 1024), pagesize); + if (revision >= 'c') + otp_tag = otp_setup(device, revision); + + dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n", + name, DIV_ROUND_UP(device->size, 1024), + pagesize, otp_tag); dev_set_drvdata(&spi->dev, priv); if (mtd_has_partitions()) { @@ -519,6 +700,14 @@ add_dataflash(struct spi_device *spi, char *name, return add_mtd_device(device) == 1 ? -ENODEV : 0; } +static inline int __devinit +add_dataflash(struct spi_device *spi, char *name, + int nr_pages, int pagesize, int pageoffset) +{ + return add_dataflash_otp(spi, name, nr_pages, pagesize, + pageoffset, 0); +} + struct flash_info { char *name; @@ -664,13 +853,16 @@ static int __devinit dataflash_probe(struct spi_device *spi) * Try to detect dataflash by JEDEC ID. * If it succeeds we know we have either a C or D part. * D will support power of 2 pagesize option. + * Both support the security register, though with different + * write procedures. */ info = jedec_probe(spi); if (IS_ERR(info)) return PTR_ERR(info); if (info != NULL) - return add_dataflash(spi, info->name, info->nr_pages, - info->pagesize, info->pageoffset); + return add_dataflash_otp(spi, info->name, info->nr_pages, + info->pagesize, info->pageoffset, + (info->flags & SUP_POW2PS) ? 'd' : 'c'); /* * Older chips support only legacy commands, identifing diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index d2f3318..13cc67a 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -350,7 +350,7 @@ static void mtdchar_erase_callback (struct erase_info *instr) wake_up((wait_queue_head_t *)instr->priv); } -#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) +#ifdef CONFIG_HAVE_MTD_OTP static int otp_select_filemode(struct mtd_file_info *mfi, int mode) { struct mtd_info *mtd = mfi->mtd; @@ -663,7 +663,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file, break; } -#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) +#ifdef CONFIG_HAVE_MTD_OTP case OTPSELECT: { int mode; diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index cb41cbc..b94a61b 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -29,6 +29,7 @@ config MTD_ONENAND_GENERIC config MTD_ONENAND_OTP bool "OneNAND OTP Support" + select HAVE_MTD_OTP help One Block of the NAND Flash Array memory is reserved as a One-Time Programmable Block memory area. -- cgit v0.10.2 From feb2f55db45919aa80731f8877b60cab454b7b94 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 1 Aug 2008 11:53:29 +0300 Subject: [MTD] [OneNAND] Add defines for HF and sync write Signed-off-by: Adrian Hunter Signed-off-by: David Woodhouse diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index d1b310c..0c6bbe2 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -152,6 +152,8 @@ #define ONENAND_SYS_CFG1_INT (1 << 6) #define ONENAND_SYS_CFG1_IOBE (1 << 5) #define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) +#define ONENAND_SYS_CFG1_HF (1 << 2) +#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1) /* * Controller Status Register F240h (R) -- cgit v0.10.2 From c4308d1076830a72e05eb3e5f58b9ed851229399 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 1 Aug 2008 11:44:20 -0500 Subject: [MTD] remove code associated with !CONFIG_PPC_MERGE Now that arch/ppc is gone we don't need CONFIG_PPC_MERGE anymore remove the dead code associated with !CONFIG_PPC_MERGE. The mtd maps should be using the OF based mechanism. Signed-off-by: Kumar Gala Acked-by: Josh Boyer Signed-off-by: David Woodhouse diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index df8e00b..db667b1 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -332,30 +332,6 @@ config MTD_CFI_FLAGADM Mapping for the Flaga digital module. If you don't have one, ignore this setting. -config MTD_WALNUT - tristate "Flash device mapped on IBM 405GP Walnut" - depends on MTD_JEDECPROBE && WALNUT && !PPC_MERGE - help - This enables access routines for the flash chips on the IBM 405GP - Walnut board. If you have one of these boards and would like to - use the flash chips on it, say 'Y'. - -config MTD_EBONY - tristate "Flash devices mapped on IBM 440GP Ebony" - depends on MTD_JEDECPROBE && EBONY && !PPC_MERGE - help - This enables access routines for the flash chips on the IBM 440GP - Ebony board. If you have one of these boards and would like to - use the flash chips on it, say 'Y'. - -config MTD_OCOTEA - tristate "Flash devices mapped on IBM 440GX Ocotea" - depends on MTD_CFI && OCOTEA && !PPC_MERGE - help - This enables access routines for the flash chips on the IBM 440GX - Ocotea board. If you have one of these boards and would like to - use the flash chips on it, say 'Y'. - config MTD_REDWOOD tristate "CFI Flash devices mapped on IBM Redwood" depends on MTD_CFI && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 ) diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 6cda6df..b258250 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -50,9 +50,6 @@ obj-$(CONFIG_MTD_REDWOOD) += redwood.o obj-$(CONFIG_MTD_UCLINUX) += uclinux.o obj-$(CONFIG_MTD_NETtel) += nettel.o obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o -obj-$(CONFIG_MTD_EBONY) += ebony.o -obj-$(CONFIG_MTD_OCOTEA) += ocotea.o -obj-$(CONFIG_MTD_WALNUT) += walnut.o obj-$(CONFIG_MTD_H720X) += h720x-flash.o obj-$(CONFIG_MTD_SBC8240) += sbc8240.o obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c deleted file mode 100644 index d92b7c7..0000000 --- a/drivers/mtd/maps/ebony.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Mapping for Ebony user flash - * - * Matt Porter - * - * Copyright 2002-2004 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct mtd_info *flash; - -static struct map_info ebony_small_map = { - .name = "Ebony small flash", - .size = EBONY_SMALL_FLASH_SIZE, - .bankwidth = 1, -}; - -static struct map_info ebony_large_map = { - .name = "Ebony large flash", - .size = EBONY_LARGE_FLASH_SIZE, - .bankwidth = 1, -}; - -static struct mtd_partition ebony_small_partitions[] = { - { - .name = "OpenBIOS", - .offset = 0x0, - .size = 0x80000, - } -}; - -static struct mtd_partition ebony_large_partitions[] = { - { - .name = "fs", - .offset = 0, - .size = 0x380000, - }, - { - .name = "firmware", - .offset = 0x380000, - .size = 0x80000, - } -}; - -int __init init_ebony(void) -{ - u8 fpga0_reg; - u8 __iomem *fpga0_adr; - unsigned long long small_flash_base, large_flash_base; - - fpga0_adr = ioremap64(EBONY_FPGA_ADDR, 16); - if (!fpga0_adr) - return -ENOMEM; - - fpga0_reg = readb(fpga0_adr); - iounmap(fpga0_adr); - - if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) && - !EBONY_FLASH_SEL(fpga0_reg)) - small_flash_base = EBONY_SMALL_FLASH_HIGH2; - else if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) && - EBONY_FLASH_SEL(fpga0_reg)) - small_flash_base = EBONY_SMALL_FLASH_HIGH1; - else if (!EBONY_BOOT_SMALL_FLASH(fpga0_reg) && - !EBONY_FLASH_SEL(fpga0_reg)) - small_flash_base = EBONY_SMALL_FLASH_LOW2; - else - small_flash_base = EBONY_SMALL_FLASH_LOW1; - - if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) && - !EBONY_ONBRD_FLASH_EN(fpga0_reg)) - large_flash_base = EBONY_LARGE_FLASH_LOW; - else - large_flash_base = EBONY_LARGE_FLASH_HIGH; - - ebony_small_map.phys = small_flash_base; - ebony_small_map.virt = ioremap64(small_flash_base, - ebony_small_map.size); - - if (!ebony_small_map.virt) { - printk("Failed to ioremap flash\n"); - return -EIO; - } - - simple_map_init(&ebony_small_map); - - flash = do_map_probe("jedec_probe", &ebony_small_map); - if (flash) { - flash->owner = THIS_MODULE; - add_mtd_partitions(flash, ebony_small_partitions, - ARRAY_SIZE(ebony_small_partitions)); - } else { - printk("map probe failed for flash\n"); - iounmap(ebony_small_map.virt); - return -ENXIO; - } - - ebony_large_map.phys = large_flash_base; - ebony_large_map.virt = ioremap64(large_flash_base, - ebony_large_map.size); - - if (!ebony_large_map.virt) { - printk("Failed to ioremap flash\n"); - iounmap(ebony_small_map.virt); - return -EIO; - } - - simple_map_init(&ebony_large_map); - - flash = do_map_probe("jedec_probe", &ebony_large_map); - if (flash) { - flash->owner = THIS_MODULE; - add_mtd_partitions(flash, ebony_large_partitions, - ARRAY_SIZE(ebony_large_partitions)); - } else { - printk("map probe failed for flash\n"); - iounmap(ebony_small_map.virt); - iounmap(ebony_large_map.virt); - return -ENXIO; - } - - return 0; -} - -static void __exit cleanup_ebony(void) -{ - if (flash) { - del_mtd_partitions(flash); - map_destroy(flash); - } - - if (ebony_small_map.virt) { - iounmap(ebony_small_map.virt); - ebony_small_map.virt = NULL; - } - - if (ebony_large_map.virt) { - iounmap(ebony_large_map.virt); - ebony_large_map.virt = NULL; - } -} - -module_init(init_ebony); -module_exit(cleanup_ebony); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Matt Porter "); -MODULE_DESCRIPTION("MTD map and partitions for IBM 440GP Ebony boards"); diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c deleted file mode 100644 index 5522eac..0000000 --- a/drivers/mtd/maps/ocotea.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Mapping for Ocotea user flash - * - * Matt Porter - * - * Copyright 2002-2004 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct mtd_info *flash; - -static struct map_info ocotea_small_map = { - .name = "Ocotea small flash", - .size = OCOTEA_SMALL_FLASH_SIZE, - .buswidth = 1, -}; - -static struct map_info ocotea_large_map = { - .name = "Ocotea large flash", - .size = OCOTEA_LARGE_FLASH_SIZE, - .buswidth = 1, -}; - -static struct mtd_partition ocotea_small_partitions[] = { - { - .name = "pibs", - .offset = 0x0, - .size = 0x100000, - } -}; - -static struct mtd_partition ocotea_large_partitions[] = { - { - .name = "fs", - .offset = 0, - .size = 0x300000, - }, - { - .name = "firmware", - .offset = 0x300000, - .size = 0x100000, - } -}; - -int __init init_ocotea(void) -{ - u8 fpga0_reg; - u8 *fpga0_adr; - unsigned long long small_flash_base, large_flash_base; - - fpga0_adr = ioremap64(OCOTEA_FPGA_ADDR, 16); - if (!fpga0_adr) - return -ENOMEM; - - fpga0_reg = readb((unsigned long)fpga0_adr); - iounmap(fpga0_adr); - - if (OCOTEA_BOOT_LARGE_FLASH(fpga0_reg)) { - small_flash_base = OCOTEA_SMALL_FLASH_HIGH; - large_flash_base = OCOTEA_LARGE_FLASH_LOW; - } - else { - small_flash_base = OCOTEA_SMALL_FLASH_LOW; - large_flash_base = OCOTEA_LARGE_FLASH_HIGH; - } - - ocotea_small_map.phys = small_flash_base; - ocotea_small_map.virt = ioremap64(small_flash_base, - ocotea_small_map.size); - - if (!ocotea_small_map.virt) { - printk("Failed to ioremap flash\n"); - return -EIO; - } - - simple_map_init(&ocotea_small_map); - - flash = do_map_probe("map_rom", &ocotea_small_map); - if (flash) { - flash->owner = THIS_MODULE; - add_mtd_partitions(flash, ocotea_small_partitions, - ARRAY_SIZE(ocotea_small_partitions)); - } else { - printk("map probe failed for flash\n"); - iounmap(ocotea_small_map.virt); - return -ENXIO; - } - - ocotea_large_map.phys = large_flash_base; - ocotea_large_map.virt = ioremap64(large_flash_base, - ocotea_large_map.size); - - if (!ocotea_large_map.virt) { - printk("Failed to ioremap flash\n"); - iounmap(ocotea_small_map.virt); - return -EIO; - } - - simple_map_init(&ocotea_large_map); - - flash = do_map_probe("cfi_probe", &ocotea_large_map); - if (flash) { - flash->owner = THIS_MODULE; - add_mtd_partitions(flash, ocotea_large_partitions, - ARRAY_SIZE(ocotea_large_partitions)); - } else { - printk("map probe failed for flash\n"); - iounmap(ocotea_small_map.virt); - iounmap(ocotea_large_map.virt); - return -ENXIO; - } - - return 0; -} - -static void __exit cleanup_ocotea(void) -{ - if (flash) { - del_mtd_partitions(flash); - map_destroy(flash); - } - - if (ocotea_small_map.virt) { - iounmap((void *)ocotea_small_map.virt); - ocotea_small_map.virt = 0; - } - - if (ocotea_large_map.virt) { - iounmap((void *)ocotea_large_map.virt); - ocotea_large_map.virt = 0; - } -} - -module_init(init_ocotea); -module_exit(cleanup_ocotea); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Matt Porter "); -MODULE_DESCRIPTION("MTD map and partitions for IBM 440GX Ocotea boards"); diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c deleted file mode 100644 index e243476..0000000 --- a/drivers/mtd/maps/walnut.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Mapping for Walnut flash - * (used ebony.c as a "framework") - * - * Heikki Lindholm - * - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* these should be in platforms/4xx/walnut.h ? */ -#define WALNUT_FLASH_ONBD_N(x) (x & 0x02) -#define WALNUT_FLASH_SRAM_SEL(x) (x & 0x01) -#define WALNUT_FLASH_LOW 0xFFF00000 -#define WALNUT_FLASH_HIGH 0xFFF80000 -#define WALNUT_FLASH_SIZE 0x80000 - -static struct mtd_info *flash; - -static struct map_info walnut_map = { - .name = "Walnut flash", - .size = WALNUT_FLASH_SIZE, - .bankwidth = 1, -}; - -/* Actually, OpenBIOS is the last 128 KiB of the flash - better - * partitioning could be made */ -static struct mtd_partition walnut_partitions[] = { - { - .name = "OpenBIOS", - .offset = 0x0, - .size = WALNUT_FLASH_SIZE, - /*.mask_flags = MTD_WRITEABLE, */ /* force read-only */ - } -}; - -int __init init_walnut(void) -{ - u8 fpga_brds1; - void *fpga_brds1_adr; - void *fpga_status_adr; - unsigned long flash_base; - - /* this should already be mapped (platform/4xx/walnut.c) */ - fpga_status_adr = ioremap(WALNUT_FPGA_BASE, 8); - if (!fpga_status_adr) - return -ENOMEM; - - fpga_brds1_adr = fpga_status_adr+5; - fpga_brds1 = readb(fpga_brds1_adr); - /* iounmap(fpga_status_adr); */ - - if (WALNUT_FLASH_ONBD_N(fpga_brds1)) { - printk("The on-board flash is disabled (U79 sw 5)!"); - iounmap(fpga_status_adr); - return -EIO; - } - if (WALNUT_FLASH_SRAM_SEL(fpga_brds1)) - flash_base = WALNUT_FLASH_LOW; - else - flash_base = WALNUT_FLASH_HIGH; - - walnut_map.phys = flash_base; - walnut_map.virt = - (void __iomem *)ioremap(flash_base, walnut_map.size); - - if (!walnut_map.virt) { - printk("Failed to ioremap flash.\n"); - iounmap(fpga_status_adr); - return -EIO; - } - - simple_map_init(&walnut_map); - - flash = do_map_probe("jedec_probe", &walnut_map); - if (flash) { - flash->owner = THIS_MODULE; - add_mtd_partitions(flash, walnut_partitions, - ARRAY_SIZE(walnut_partitions)); - } else { - printk("map probe failed for flash\n"); - iounmap(fpga_status_adr); - return -ENXIO; - } - - iounmap(fpga_status_adr); - return 0; -} - -static void __exit cleanup_walnut(void) -{ - if (flash) { - del_mtd_partitions(flash); - map_destroy(flash); - } - - if (walnut_map.virt) { - iounmap((void *)walnut_map.virt); - walnut_map.virt = 0; - } -} - -module_init(init_walnut); -module_exit(cleanup_walnut); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Heikki Lindholm "); -MODULE_DESCRIPTION("MTD map and partitions for IBM 405GP Walnut boards"); -- cgit v0.10.2 From c8872b069c536976b81bccfc95dda945594bc504 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 2 Aug 2008 17:14:21 +0200 Subject: [MTD] Use DIV_ROUND_UP The kernel.h macro DIV_ROUND_UP performs the computation (((n) + (d) - 1) / (d)) but is perhaps more readable. Signed-off-by: Julia Lawall Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 5f1b472..d49cbe2 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -1640,7 +1640,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, /* Figure out the number of words to write */ word_gap = (-adr & (map_bankwidth(map)-1)); - words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); + words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map)); if (!word_gap) { words--; } else { diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index f061885..e2dc964 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c @@ -111,7 +111,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi max_chips = 1; } - mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG ); + mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG); chip_map = kzalloc(mapsize, GFP_KERNEL); if (!chip_map) { printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index a5f3d60..33a5d6e 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c @@ -321,8 +321,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, - (ssfdc->map_len + MAX_PHYS_BLK_PER_ZONE - 1) / - MAX_PHYS_BLK_PER_ZONE); + DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE)); /* Set geometry */ ssfdc->heads = 16; -- cgit v0.10.2 From 16e00b609aed439453d57b954b449f647466e0d7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 4 Aug 2008 11:25:23 +0100 Subject: [MTD] Remove references to TI 'toto' platform. This was a reference board for which support never got merged upstream. Kill it off, at rmk's suggestion. Signed-off-by: David Woodhouse diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index db667b1..3ae76ec 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -434,13 +434,6 @@ config MTD_CEIVA PhotoMax Digital Picture Frame. If you have such a device, say 'Y'. -config MTD_NOR_TOTO - tristate "NOR Flash device on TOTO board" - depends on ARCH_OMAP && OMAP_TOTO - help - This enables access to the NOR flash on the Texas Instruments - TOTO board. - config MTD_H720X tristate "Hynix evaluation board mappings" depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index b258250..6d9ba35 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -52,7 +52,6 @@ obj-$(CONFIG_MTD_NETtel) += nettel.o obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o obj-$(CONFIG_MTD_H720X) += h720x-flash.o obj-$(CONFIG_MTD_SBC8240) += sbc8240.o -obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o obj-$(CONFIG_MTD_IXP2000) += ixp2000.o obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c deleted file mode 100644 index 0a60ebb..0000000 --- a/drivers/mtd/maps/omap-toto-flash.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * NOR Flash memory access on TI Toto board - * - * jzhang@ti.com (C) 2003 Texas Instruments. - * - * (C) 2002 MontVista Software, Inc. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include - - -#ifndef CONFIG_ARCH_OMAP -#error This is for OMAP architecture only -#endif - -//these lines need be moved to a hardware header file -#define OMAP_TOTO_FLASH_BASE 0xd8000000 -#define OMAP_TOTO_FLASH_SIZE 0x80000 - -static struct map_info omap_toto_map_flash = { - .name = "OMAP Toto flash", - .bankwidth = 2, - .virt = (void __iomem *)OMAP_TOTO_FLASH_BASE, -}; - - -static struct mtd_partition toto_flash_partitions[] = { - { - .name = "BootLoader", - .size = 0x00040000, /* hopefully u-boot will stay 128k + 128*/ - .offset = 0, - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, { - .name = "ReservedSpace", - .size = 0x00030000, - .offset = MTDPART_OFS_APPEND, - //mask_flags: MTD_WRITEABLE, /* force read-only */ - }, { - .name = "EnvArea", /* bottom 64KiB for env vars */ - .size = MTDPART_SIZ_FULL, - .offset = MTDPART_OFS_APPEND, - } -}; - -static struct mtd_partition *parsed_parts; - -static struct mtd_info *flash_mtd; - -static int __init init_flash (void) -{ - - struct mtd_partition *parts; - int nb_parts = 0; - int parsed_nr_parts = 0; - const char *part_type; - - /* - * Static partition definition selection - */ - part_type = "static"; - - parts = toto_flash_partitions; - nb_parts = ARRAY_SIZE(toto_flash_partitions); - omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE; - omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE); - - simple_map_init(&omap_toto_map_flash); - /* - * Now let's probe for the actual flash. Do it here since - * specific machine settings might have been set above. - */ - printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n", - omap_toto_map_flash.bankwidth*8); - flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash); - if (!flash_mtd) - return -ENXIO; - - if (parsed_nr_parts > 0) { - parts = parsed_parts; - nb_parts = parsed_nr_parts; - } - - if (nb_parts == 0) { - printk(KERN_NOTICE "OMAP toto flash: no partition info available," - "registering whole flash at once\n"); - if (add_mtd_device(flash_mtd)){ - return -ENXIO; - } - } else { - printk(KERN_NOTICE "Using %s partition definition\n", - part_type); - return add_mtd_partitions(flash_mtd, parts, nb_parts); - } - return 0; -} - -int __init omap_toto_mtd_init(void) -{ - int status; - - if (status = init_flash()) { - printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n"); - } - return status; -} - -static void __exit omap_toto_mtd_cleanup(void) -{ - if (flash_mtd) { - del_mtd_partitions(flash_mtd); - map_destroy(flash_mtd); - kfree(parsed_parts); - } -} - -module_init(omap_toto_mtd_init); -module_exit(omap_toto_mtd_cleanup); - -MODULE_AUTHOR("Jian Zhang"); -MODULE_DESCRIPTION("OMAP Toto board map driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 02f9cc3..572c842 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -68,12 +68,6 @@ config MTD_NAND_AMS_DELTA help Support for NAND flash on Amstrad E3 (Delta). -config MTD_NAND_TOTO - tristate "NAND Flash device on TOTO board" - depends on ARCH_OMAP && BROKEN - help - Support for NAND flash on Texas Instruments Toto platform. - config MTD_NAND_TS7250 tristate "NAND Flash device on TS-7250 board" depends on MACH_TS72XX diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index d772581..b55e4c6 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o obj-$(CONFIG_MTD_NAND_SPIA) += spia.o obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o -obj-$(CONFIG_MTD_NAND_TOTO) += toto.o obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c deleted file mode 100644 index bbf492e..0000000 --- a/drivers/mtd/nand/toto.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * drivers/mtd/nand/toto.c - * - * Copyright (c) 2003 Texas Instruments - * - * Derived from drivers/mtd/autcpu12.c - * - * Copyright (c) 2002 Thomas Gleixner - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the NAND flash device found on the - * TI fido board. It supports 32MiB and 64MiB cards - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CONFIG_NAND_WORKAROUND 1 - -/* - * MTD structure for TOTO board - */ -static struct mtd_info *toto_mtd = NULL; - -static unsigned long toto_io_base = OMAP_FLASH_1_BASE; - -/* - * Define partitions for flash devices - */ - -static struct mtd_partition partition_info64M[] = { - { .name = "toto kernel partition 1", - .offset = 0, - .size = 2 * SZ_1M }, - { .name = "toto file sys partition 2", - .offset = 2 * SZ_1M, - .size = 14 * SZ_1M }, - { .name = "toto user partition 3", - .offset = 16 * SZ_1M, - .size = 16 * SZ_1M }, - { .name = "toto devboard extra partition 4", - .offset = 32 * SZ_1M, - .size = 32 * SZ_1M }, -}; - -static struct mtd_partition partition_info32M[] = { - { .name = "toto kernel partition 1", - .offset = 0, - .size = 2 * SZ_1M }, - { .name = "toto file sys partition 2", - .offset = 2 * SZ_1M, - .size = 14 * SZ_1M }, - { .name = "toto user partition 3", - .offset = 16 * SZ_1M, - .size = 16 * SZ_1M }, -}; - -#define NUM_PARTITIONS32M 3 -#define NUM_PARTITIONS64M 4 - -/* - * hardware specific access to control-lines - * - * ctrl: - * NAND_NCE: bit 0 -> bit 14 (0x4000) - * NAND_CLE: bit 1 -> bit 12 (0x1000) - * NAND_ALE: bit 2 -> bit 1 (0x0002) - */ -static void toto_hwcontrol(struct mtd_info *mtd, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (ctrl & NAND_CTRL_CHANGE) { - unsigned long bits; - - /* hopefully enough time for tc make proceding write to clear */ - udelay(1); - - bits = (~ctrl & NAND_NCE) << 14; - bits |= (ctrl & NAND_CLE) << 12; - bits |= (ctrl & NAND_ALE) >> 1; - -#warning Wild guess as gpiosetout() is nowhere defined in the kernel source - tglx - gpiosetout(0x5002, bits); - -#ifdef CONFIG_NAND_WORKAROUND - /* "some" dev boards busted, blue wired to rts2 :( */ - rts2setout(2, (ctrl & NAND_CLE) << 1); -#endif - /* allow time to ensure gpio state to over take memory write */ - udelay(1); - } - - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W); -} - -/* - * Main initialization routine - */ -static int __init toto_init(void) -{ - struct nand_chip *this; - int err = 0; - - /* Allocate memory for MTD device structure and private data */ - toto_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!toto_mtd) { - printk(KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n"); - err = -ENOMEM; - goto out; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&toto_mtd[1]); - - /* Initialize structures */ - memset(toto_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - toto_mtd->priv = this; - toto_mtd->owner = THIS_MODULE; - - /* Set address of NAND IO lines */ - this->IO_ADDR_R = toto_io_base; - this->IO_ADDR_W = toto_io_base; - this->cmd_ctrl = toto_hwcontrol; - this->dev_ready = NULL; - /* 25 us command delay time */ - this->chip_delay = 30; - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existance of the device */ - if (nand_scan(toto_mtd, 1)) { - err = -ENXIO; - goto out_mtd; - } - - /* Register the partitions */ - switch (toto_mtd->size) { - case SZ_64M: - add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M); - break; - case SZ_32M: - add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M); - break; - default:{ - printk(KERN_WARNING "Unsupported Nand device\n"); - err = -ENXIO; - goto out_buf; - } - } - - gpioreserve(NAND_MASK); /* claim our gpios */ - archflashwp(0, 0); /* open up flash for writing */ - - goto out; - - out_mtd: - kfree(toto_mtd); - out: - return err; -} - -module_init(toto_init); - -/* - * Clean up routine - */ -static void __exit toto_cleanup(void) -{ - /* Release resources, unregister device */ - nand_release(toto_mtd); - - /* Free the MTD device structure */ - kfree(toto_mtd); - - /* stop flash writes */ - archflashwp(0, 1); - - /* release gpios to system */ - gpiorelease(NAND_MASK); -} - -module_exit(toto_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Richard Woodruff "); -MODULE_DESCRIPTION("Glue layer for NAND flash on toto board"); -- cgit v0.10.2 From a0e7229edbfef9495e73bc8baea2131a7e69e365 Mon Sep 17 00:00:00 2001 From: "George G. Davis" Date: Mon, 4 Aug 2008 19:43:25 -0400 Subject: [MTD] [NOR] Add "Spansion" to MTD_CFI_AMDSTD kconfig menu description This long overdue trivial change to the MTD_CFI_AMDSTD kconfig menu description is intended to help clarify that this option also supports Spansion flash devices. Signed-off-by: George G. Davis Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index 4c35e5d..9401bfe 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -188,7 +188,7 @@ config MTD_CFI_INTELEXT StrataFlash and other parts. config MTD_CFI_AMDSTD - tristate "Support for AMD/Fujitsu flash chips" + tristate "Support for AMD/Fujitsu/Spansion flash chips" depends on MTD_GEN_PROBE select MTD_CFI_UTIL help -- cgit v0.10.2 From 2e489e077a6ad118c4f247faedf330117b107cce Mon Sep 17 00:00:00 2001 From: Alexey Korolev Date: Tue, 5 Aug 2008 16:39:42 +0100 Subject: [MTD] [NOR] Add qry_mode_on()/qry_omde_off() to deal with odd chips There are some CFI chips which require non standard procedures to get into QRY mode. The possible way to support them would be trying different modes till QRY will be read. This patch introduce two new functions qry_mode_on qry_mode_off. qry_mode_on tries different commands in order switch chip into QRY mode. So if we have one more "odd" chip - we just could add several lines to qry_mode_on. Also using these functions remove unnecessary code duplicaton in porbe procedure. Currently there are two "odd" cases 1. Some old intel chips which require 0xFF before 0x98 2. ST M29DW chip which requires 0x98 to be sent at 0x555 (according to CFI should be 0x55) This patch is partialy based on the patch from Uwe (see "[PATCH 2/4] [RFC][MTD] cfi_probe: remove Intel chip workaround" thread ) Signed-off-by: Alexey Korolev Signed-off-by: Alexander Belyakov Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index c418e92..e706be2 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -44,17 +44,14 @@ do { \ #define xip_enable(base, map, cfi) \ do { \ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \ - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \ + qry_mode_off(base, map, cfi); \ xip_allowed(base, map); \ } while (0) #define xip_disable_qry(base, map, cfi) \ do { \ xip_disable(); \ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \ - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \ - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \ + qry_mode_on(base, map, cfi); \ } while (0) #else @@ -70,32 +67,6 @@ do { \ in: interleave,type,mode ret: table index, <0 for error */ -static int __xipram qry_present(struct map_info *map, __u32 base, - struct cfi_private *cfi) -{ - int osf = cfi->interleave * cfi->device_type; // scale factor - map_word val[3]; - map_word qry[3]; - - qry[0] = cfi_build_cmd('Q', map, cfi); - qry[1] = cfi_build_cmd('R', map, cfi); - qry[2] = cfi_build_cmd('Y', map, cfi); - - val[0] = map_read(map, base + osf*0x10); - val[1] = map_read(map, base + osf*0x11); - val[2] = map_read(map, base + osf*0x12); - - if (!map_word_equal(map, qry[0], val[0])) - return 0; - - if (!map_word_equal(map, qry[1], val[1])) - return 0; - - if (!map_word_equal(map, qry[2], val[2])) - return 0; - - return 1; // "QRY" found -} static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi) @@ -116,11 +87,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, } xip_disable(); - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - - if (!qry_present(map,base,cfi)) { + if (!qry_mode_on(base, map, cfi)) { xip_enable(base, map, cfi); return 0; } @@ -144,8 +111,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, if (qry_present(map, start, cfi)) { /* Eep. This chip also had the QRY marker. * Is it an alias for the new one? */ - cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); + qry_mode_off(start, map, cfi); /* If the QRY marker goes away, it's an alias */ if (!qry_present(map, start, cfi)) { @@ -158,8 +124,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, * unfortunate. Stick the new chip in read mode * too and if it's the same, assume it's an alias. */ /* FIXME: Use other modes to do a proper check */ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); + qry_mode_off(base, map, cfi); if (qry_present(map, base, cfi)) { xip_allowed(base, map); @@ -176,8 +141,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, cfi->numchips++; /* Put it back into Read Mode */ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); + qry_mode_off(base, map, cfi); xip_allowed(base, map); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", @@ -237,9 +201,7 @@ static int __xipram cfi_chip_setup(struct map_info *map, cfi_read_query(map, base + 0xf * ofs_factor); /* Put it back into Read Mode */ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - /* ... even if it's an Intel chip */ - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); + qry_mode_off(base, map, cfi); xip_allowed(base, map); /* Do any necessary byteswapping */ diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index 0ee4570..8d75536 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -24,6 +24,62 @@ #include #include +int __xipram qry_present(struct map_info *map, __u32 base, + struct cfi_private *cfi) +{ + int osf = cfi->interleave * cfi->device_type; /* scale factor */ + map_word val[3]; + map_word qry[3]; + + qry[0] = cfi_build_cmd('Q', map, cfi); + qry[1] = cfi_build_cmd('R', map, cfi); + qry[2] = cfi_build_cmd('Y', map, cfi); + + val[0] = map_read(map, base + osf*0x10); + val[1] = map_read(map, base + osf*0x11); + val[2] = map_read(map, base + osf*0x12); + + if (!map_word_equal(map, qry[0], val[0])) + return 0; + + if (!map_word_equal(map, qry[1], val[1])) + return 0; + + if (!map_word_equal(map, qry[2], val[2])) + return 0; + + return 1; /* "QRY" found */ +} + +int __xipram qry_mode_on(uint32_t base, struct map_info *map, + struct cfi_private *cfi) +{ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); + if (qry_present(map, base, cfi)) + return 1; + /* QRY not found probably we deal with some odd CFI chips */ + /* Some revisions of some old Intel chips? */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); + if (qry_present(map, base, cfi)) + return 1; + /* ST M29DW chips */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); + if (qry_present(map, base, cfi)) + return 1; + /* QRY not found */ + return 0; +} +void __xipram qry_mode_off(uint32_t base, struct map_info *map, + struct cfi_private *cfi) +{ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); +} + struct cfi_extquery * __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) { @@ -48,8 +104,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n #endif /* Switch it into Query Mode */ - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - + qry_mode_on(base, map, cfi); /* Read in the Extended Query Table */ for (i=0; idevice_type, NULL); - cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL); + qry_mode_off(base, map, cfi); #ifdef CONFIG_MTD_XIP (void) map_read(map, base); diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index d6fb115..3058917 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_MTD_CFI_I1 #define cfi_interleave(cfi) 1 @@ -430,7 +431,6 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t { map_word val; uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); - val = cfi_build_cmd(cmd, map, cfi); if (prev_val) @@ -483,6 +483,13 @@ static inline void cfi_udelay(int us) } } +int __xipram qry_present(struct map_info *map, __u32 base, + struct cfi_private *cfi); +int __xipram qry_mode_on(uint32_t base, struct map_info *map, + struct cfi_private *cfi); +void __xipram qry_mode_off(uint32_t base, struct map_info *map, + struct cfi_private *cfi); + struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, const char* name); struct cfi_fixup { -- cgit v0.10.2 From e93cafe45fd74935e0aca2b79e533f0e3ed9640f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20Grafstr=C3=B6m?= Date: Tue, 5 Aug 2008 18:37:41 +0200 Subject: [MTD] [NOR] cfi_cmdset_0001: Timeouts for erase, write and unlock operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Timeouts are currently given by the typical operation time times 8. It works in the general well-behaved case but not when an erase block is failing. For erase operations, it seems that a failing erase block will keep the device state machine in erasing state until the vendor specified maximum timeout period has passed. By this time the driver would have long since timed out, left erasing state and attempted further operations which all fail. This patch implements timeouts using values from the CFI Query structure when available. The patch also sets a longer timeout for locking operations. The current value used for locking/unlocking given by 1000000/HZ microseconds is too short for devices like J3 and J5 Strataflash which have a typical clear lock-bits time of 0.5 seconds. Signed-off-by: Anders Grafström Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index d49cbe2..5157e3c 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -478,6 +478,28 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) else cfi->chips[i].erase_time = 2000000; + if (cfi->cfiq->WordWriteTimeoutTyp && + cfi->cfiq->WordWriteTimeoutMax) + cfi->chips[i].word_write_time_max = + 1<<(cfi->cfiq->WordWriteTimeoutTyp + + cfi->cfiq->WordWriteTimeoutMax); + else + cfi->chips[i].word_write_time_max = 50000 * 8; + + if (cfi->cfiq->BufWriteTimeoutTyp && + cfi->cfiq->BufWriteTimeoutMax) + cfi->chips[i].buffer_write_time_max = + 1<<(cfi->cfiq->BufWriteTimeoutTyp + + cfi->cfiq->BufWriteTimeoutMax); + + if (cfi->cfiq->BlockEraseTimeoutTyp && + cfi->cfiq->BlockEraseTimeoutMax) + cfi->chips[i].erase_time_max = + 1000<<(cfi->cfiq->BlockEraseTimeoutTyp + + cfi->cfiq->BlockEraseTimeoutMax); + else + cfi->chips[i].erase_time_max = 2000000 * 8; + cfi->chips[i].ref_point_counter = 0; init_waitqueue_head(&(cfi->chips[i].wq)); } @@ -1012,7 +1034,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip, static int __xipram xip_wait_for_operation( struct map_info *map, struct flchip *chip, - unsigned long adr, unsigned int chip_op_time ) + unsigned long adr, unsigned int chip_op_time_max) { struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_intelext *cfip = cfi->cmdset_priv; @@ -1021,7 +1043,7 @@ static int __xipram xip_wait_for_operation( flstate_t oldstate, newstate; start = xip_currtime(); - usec = chip_op_time * 8; + usec = chip_op_time_max; if (usec == 0) usec = 500000; done = 0; @@ -1131,8 +1153,8 @@ static int __xipram xip_wait_for_operation( #define XIP_INVAL_CACHED_RANGE(map, from, size) \ INVALIDATE_CACHED_RANGE(map, from, size) -#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \ - xip_wait_for_operation(map, chip, cmd_adr, usec) +#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \ + xip_wait_for_operation(map, chip, cmd_adr, usec_max) #else @@ -1144,7 +1166,7 @@ static int __xipram xip_wait_for_operation( static int inval_cache_and_wait_for_operation( struct map_info *map, struct flchip *chip, unsigned long cmd_adr, unsigned long inval_adr, int inval_len, - unsigned int chip_op_time) + unsigned int chip_op_time, unsigned int chip_op_time_max) { struct cfi_private *cfi = map->fldrv_priv; map_word status, status_OK = CMD(0x80); @@ -1156,8 +1178,7 @@ static int inval_cache_and_wait_for_operation( INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); spin_lock(chip->mutex); - /* set our timeout to 8 times the expected delay */ - timeo = chip_op_time * 8; + timeo = chip_op_time_max; if (!timeo) timeo = 500000; reset_timeo = timeo; @@ -1217,8 +1238,8 @@ static int inval_cache_and_wait_for_operation( #endif -#define WAIT_TIMEOUT(map, chip, adr, udelay) \ - INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay); +#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \ + INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max); static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) @@ -1452,7 +1473,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, ret = INVAL_CACHE_AND_WAIT(map, chip, adr, adr, map_bankwidth(map), - chip->word_write_time); + chip->word_write_time, + chip->word_write_time_max); if (ret) { xip_enable(map, chip, adr); printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); @@ -1623,7 +1645,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, chip->state = FL_WRITING_TO_BUFFER; map_write(map, write_cmd, cmd_adr); - ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0); + ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0); if (ret) { /* Argh. Not ready for write to buffer */ map_word Xstatus = map_read(map, cmd_adr); @@ -1692,7 +1714,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, initial_adr, initial_len, - chip->buffer_write_time); + chip->buffer_write_time, + chip->buffer_write_time_max); if (ret) { map_write(map, CMD(0x70), cmd_adr); chip->state = FL_STATUS; @@ -1827,7 +1850,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, ret = INVAL_CACHE_AND_WAIT(map, chip, adr, adr, len, - chip->erase_time); + chip->erase_time, + chip->erase_time_max); if (ret) { map_write(map, CMD(0x70), adr); chip->state = FL_STATUS; @@ -2006,7 +2030,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip */ udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0; - ret = WAIT_TIMEOUT(map, chip, adr, udelay); + ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100); if (ret) { map_write(map, CMD(0x70), adr); chip->state = FL_STATUS; diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index 08dd131..d4f38c5 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -73,6 +73,10 @@ struct flchip { int buffer_write_time; int erase_time; + int word_write_time_max; + int buffer_write_time_max; + int erase_time_max; + void *priv; }; -- cgit v0.10.2 From cf93ae02600e2c752bf2570085e7970a1c0f2b94 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 6 Aug 2008 13:12:04 -0700 Subject: [MTD] Compile fix for dataflash OTP support > > linux-next-20080805/drivers/mtd/devices/mtd_dataflash.c: In function 'add_dataflash_otp': > > linux-next-20080805/drivers/mtd/devices/mtd_dataflash.c:670: error: too many arguments to function 'otp_setup' Whoops, sorry ... I see what was going on. My bad. Signed-off-by: David Brownell Signed-off-by: David Woodhouse diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 17c9b20..9016127 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -618,7 +618,7 @@ static char *otp_setup(struct mtd_info *device, char revision) #else -static char *otp_setup(struct mtd_info *device) +static char *otp_setup(struct mtd_info *device, char revision) { return " (OTP)"; } -- cgit v0.10.2 From 8c64038e4c077b2b37c6b27d0c40c77a3ddfaeef Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 6 Aug 2008 21:55:14 -0700 Subject: [MTD] make dataflash write-verify be optional This adds a WRITE_VERIFY Kconfig option to the DataFlash driver, closely mirroring the similar NAND and ONENAND options, giving an option to disable some code that's currently always enabled. Removing this step probably saves a millisecond or so per page when writing data, which will add up quickly since these pages are small (the largest is 1 KiB). It doesn't seem to add a lot in terms of reliability, and wouldn't detect errors which crop up when transferring data to the on-chip SRAM buffer. Signed-off-by: David Brownell Acked-by: Haavard Skinnemoen Acked-by: Andrew Victor Signed-off-by: David Woodhouse diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 88f4df0..6fde0a2 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -59,6 +59,16 @@ config MTD_DATAFLASH Sometimes DataFlash chips are packaged inside MMC-format cards; at this writing, the MMC stack won't handle those. +config MTD_DATAFLASH_WRITE_VERIFY + bool "Verify DataFlash page writes" + depends on MTD_DATAFLASH + help + This adds an extra check when data is written to the flash. + It may help if you are verifying chip setup (timings etc) on + your board. There is a rare possibility that even though the + device thinks the write was successful, a bit could have been + flipped accidentally due to device wear or something else. + config MTD_DATAFLASH_OTP bool "DataFlash OTP support (Security Register)" depends on MTD_DATAFLASH diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 9016127..6dd9aff 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -30,12 +30,10 @@ * doesn't (yet) use these for any kind of i/o overlap or prefetching. * * Sometimes DataFlash is packaged in MMC-format cards, although the - * MMC stack can't use SPI (yet), or distinguish between MMC and DataFlash + * MMC stack can't (yet?) distinguish between MMC and DataFlash * protocols during enumeration. */ -#define CONFIG_DATAFLASH_WRITE_VERIFY - /* reads can bypass the buffers */ #define OP_READ_CONTINUOUS 0xE8 #define OP_READ_PAGE 0xD2 @@ -403,7 +401,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, (void) dataflash_waitready(priv->spi); -#ifdef CONFIG_DATAFLASH_WRITE_VERIFY +#ifdef CONFIG_MTD_DATAFLASH_VERIFY_WRITE /* (3) Compare to Buffer1 */ addr = pageaddr << priv->page_offset; @@ -432,7 +430,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, } else status = 0; -#endif /* CONFIG_DATAFLASH_WRITE_VERIFY */ +#endif /* CONFIG_MTD_DATAFLASH_VERIFY_WRITE */ remaining = remaining - writelen; pageaddr++; -- cgit v0.10.2 From c314dfdc358847eef0fc07ec8682e1acc8cadd00 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 7 Aug 2008 11:55:07 +0100 Subject: [MTD] [NOR] Rename and export new cfi_qry_*() functions They need to be exported, so let's give them less generic-sounding names while we're at it. Original export patch, along with the suggestion about the nomenclature, from Stephen Rothwell. Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index e706be2..e63e674 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -44,14 +44,14 @@ do { \ #define xip_enable(base, map, cfi) \ do { \ - qry_mode_off(base, map, cfi); \ + cfi_qry_mode_off(base, map, cfi); \ xip_allowed(base, map); \ } while (0) #define xip_disable_qry(base, map, cfi) \ do { \ xip_disable(); \ - qry_mode_on(base, map, cfi); \ + cfi_qry_mode_on(base, map, cfi); \ } while (0) #else @@ -87,7 +87,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, } xip_disable(); - if (!qry_mode_on(base, map, cfi)) { + if (!cfi_qry_mode_on(base, map, cfi)) { xip_enable(base, map, cfi); return 0; } @@ -108,13 +108,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, start = i << cfi->chipshift; /* This chip should be in read mode if it's one we've already touched. */ - if (qry_present(map, start, cfi)) { + if (cfi_qry_present(map, start, cfi)) { /* Eep. This chip also had the QRY marker. * Is it an alias for the new one? */ - qry_mode_off(start, map, cfi); + cfi_qry_mode_off(start, map, cfi); /* If the QRY marker goes away, it's an alias */ - if (!qry_present(map, start, cfi)) { + if (!cfi_qry_present(map, start, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); @@ -124,9 +124,9 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, * unfortunate. Stick the new chip in read mode * too and if it's the same, assume it's an alias. */ /* FIXME: Use other modes to do a proper check */ - qry_mode_off(base, map, cfi); + cfi_qry_mode_off(base, map, cfi); - if (qry_present(map, base, cfi)) { + if (cfi_qry_present(map, base, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); @@ -141,7 +141,7 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, cfi->numchips++; /* Put it back into Read Mode */ - qry_mode_off(base, map, cfi); + cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", @@ -201,7 +201,7 @@ static int __xipram cfi_chip_setup(struct map_info *map, cfi_read_query(map, base + 0xf * ofs_factor); /* Put it back into Read Mode */ - qry_mode_off(base, map, cfi); + cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); /* Do any necessary byteswapping */ diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index 8d75536..34d40e2 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -24,8 +24,8 @@ #include #include -int __xipram qry_present(struct map_info *map, __u32 base, - struct cfi_private *cfi) +int __xipram cfi_qry_present(struct map_info *map, __u32 base, + struct cfi_private *cfi) { int osf = cfi->interleave * cfi->device_type; /* scale factor */ map_word val[3]; @@ -50,35 +50,39 @@ int __xipram qry_present(struct map_info *map, __u32 base, return 1; /* "QRY" found */ } +EXPORT_SYMBOL_GPL(cfi_qry_present); -int __xipram qry_mode_on(uint32_t base, struct map_info *map, - struct cfi_private *cfi) +int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, + struct cfi_private *cfi) { cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - if (qry_present(map, base, cfi)) + if (cfi_qry_present(map, base, cfi)) return 1; /* QRY not found probably we deal with some odd CFI chips */ /* Some revisions of some old Intel chips? */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - if (qry_present(map, base, cfi)) + if (cfi_qry_present(map, base, cfi)) return 1; /* ST M29DW chips */ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); - if (qry_present(map, base, cfi)) + if (cfi_qry_present(map, base, cfi)) return 1; /* QRY not found */ return 0; } -void __xipram qry_mode_off(uint32_t base, struct map_info *map, - struct cfi_private *cfi) +EXPORT_SYMBOL_GPL(cfi_qry_mode_on); + +void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, + struct cfi_private *cfi) { cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); } +EXPORT_SYMBOL_GPL(cfi_qry_mode_off); struct cfi_extquery * __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) @@ -104,7 +108,7 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n #endif /* Switch it into Query Mode */ - qry_mode_on(base, map, cfi); + cfi_qry_mode_on(base, map, cfi); /* Read in the Extended Query Table */ for (i=0; i Date: Sun, 10 Aug 2008 18:46:50 +0800 Subject: [MTD] [NAND] drivers/mtd/nand/nandsim.c: remove duplicated #include Removed duplicated include in drivers/mtd/nand/nandsim.c. Signed-off-by: Huang Weiyi Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 556e813..ae7c577 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -38,7 +38,6 @@ #include #include #include -#include /* Default simulator parameters values */ #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ -- cgit v0.10.2 From faff37508a104e9ec5285d5adecaab7e8dde472a Mon Sep 17 00:00:00 2001 From: Chen Gong Date: Mon, 11 Aug 2008 16:59:13 +0800 Subject: [MTD] m25p80.c erase enhance This patch adds an erase_block command to enhance erase operation Signed-off-by: Chen Gong Signed-off-by: David Woodhouse diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index b35c333..8fbd1b5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -39,6 +39,7 @@ #define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ +#define OPCODE_BE 0xc7 /* Erase whole flash block */ #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ #define OPCODE_RDID 0x9f /* Read JEDEC ID */ @@ -161,6 +162,31 @@ static int wait_till_ready(struct m25p *flash) return 1; } +/* + * Erase the whole flash memory + * + * Returns 0 if successful, non-zero otherwise. + */ +static int erase_block(struct m25p *flash) +{ + DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", + flash->spi->dev.bus_id, __func__, + flash->mtd.size / 1024); + + /* Wait until finished previous write command. */ + if (wait_till_ready(flash)) + return 1; + + /* Send write enable, then erase commands. */ + write_enable(flash); + + /* Set up command buffer. */ + flash->command[0] = OPCODE_BE; + + spi_write(flash->spi, flash->command, 1); + + return 0; +} /* * Erase one sector of flash memory at offset ``offset'' which is any @@ -229,15 +255,21 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) */ /* now erase those sectors */ - while (len) { - if (erase_sector(flash, addr)) { - instr->state = MTD_ERASE_FAILED; - mutex_unlock(&flash->lock); - return -EIO; - } + if (len == flash->mtd.size && erase_block(flash)) { + instr->state = MTD_ERASE_FAILED; + mutex_unlock(&flash->lock); + return -EIO; + } else { + while (len) { + if (erase_sector(flash, addr)) { + instr->state = MTD_ERASE_FAILED; + mutex_unlock(&flash->lock); + return -EIO; + } - addr += mtd->erasesize; - len -= mtd->erasesize; + addr += mtd->erasesize; + len -= mtd->erasesize; + } } mutex_unlock(&flash->lock); -- cgit v0.10.2 From 75d0ee2202b5740e94e913d8a52f91c6557c4c81 Mon Sep 17 00:00:00 2001 From: Chen Gong Date: Mon, 11 Aug 2008 16:59:14 +0800 Subject: [MTD] m25p80.c code cleanup code cleanup for m25p80.c Signed-off-by: Chen Gong Signed-off-by: David Woodhouse diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 8fbd1b5..b2b58c1 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -134,7 +134,7 @@ static inline int write_enable(struct m25p *flash) { u8 code = OPCODE_WREN; - return spi_write_then_read(flash->spi, &code, 1, NULL, 0); + return spi_write(flash->spi, &code, 1); } -- cgit v0.10.2 From d0e8c47c58575b9131e786edb488fd029eba443e Mon Sep 17 00:00:00 2001 From: Chen Gong Date: Mon, 11 Aug 2008 16:59:15 +0800 Subject: [MTD] m25p80.c extended jedec support - add extended device information support - add s25sl128 device support Signed-off-by: Chen Gong Signed-off-by: David Woodhouse diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index b2b58c1..4d3ae08 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -469,6 +469,7 @@ struct flash_info { * then a two byte device id. */ u32 jedec_id; + u16 ext_id; /* The size listed here is what works with OPCODE_SE, which isn't * necessarily called a "sector" by the vendor. @@ -488,57 +489,59 @@ struct flash_info { static struct flash_info __devinitdata m25p_data [] = { /* Atmel -- some are (confusingly) marketed as "DataFlash" */ - { "at25fs010", 0x1f6601, 32 * 1024, 4, SECT_4K, }, - { "at25fs040", 0x1f6604, 64 * 1024, 8, SECT_4K, }, + { "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, }, + { "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, }, - { "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, }, - { "at25df641", 0x1f4800, 64 * 1024, 128, SECT_4K, }, + { "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, }, + { "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, }, - { "at26f004", 0x1f0400, 64 * 1024, 8, SECT_4K, }, - { "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, }, - { "at26df161a", 0x1f4601, 64 * 1024, 32, SECT_4K, }, - { "at26df321", 0x1f4701, 64 * 1024, 64, SECT_4K, }, + { "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, }, + { "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, }, + { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, }, + { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, /* Spansion -- single (large) sector size only, at least * for the chips listed here (without boot sectors). */ - { "s25sl004a", 0x010212, 64 * 1024, 8, }, - { "s25sl008a", 0x010213, 64 * 1024, 16, }, - { "s25sl016a", 0x010214, 64 * 1024, 32, }, - { "s25sl032a", 0x010215, 64 * 1024, 64, }, - { "s25sl064a", 0x010216, 64 * 1024, 128, }, + { "s25sl004a", 0x010212, 0, 64 * 1024, 8, }, + { "s25sl008a", 0x010213, 0, 64 * 1024, 16, }, + { "s25sl016a", 0x010214, 0, 64 * 1024, 32, }, + { "s25sl032a", 0x010215, 0, 64 * 1024, 64, }, + { "s25sl064a", 0x010216, 0, 64 * 1024, 128, }, + { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, }, + { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, }, /* SST -- large erase sizes are "overlays", "sectors" are 4K */ - { "sst25vf040b", 0xbf258d, 64 * 1024, 8, SECT_4K, }, - { "sst25vf080b", 0xbf258e, 64 * 1024, 16, SECT_4K, }, - { "sst25vf016b", 0xbf2541, 64 * 1024, 32, SECT_4K, }, - { "sst25vf032b", 0xbf254a, 64 * 1024, 64, SECT_4K, }, + { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, }, + { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, }, + { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, }, + { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, }, /* ST Microelectronics -- newer production may have feature updates */ - { "m25p05", 0x202010, 32 * 1024, 2, }, - { "m25p10", 0x202011, 32 * 1024, 4, }, - { "m25p20", 0x202012, 64 * 1024, 4, }, - { "m25p40", 0x202013, 64 * 1024, 8, }, - { "m25p80", 0, 64 * 1024, 16, }, - { "m25p16", 0x202015, 64 * 1024, 32, }, - { "m25p32", 0x202016, 64 * 1024, 64, }, - { "m25p64", 0x202017, 64 * 1024, 128, }, - { "m25p128", 0x202018, 256 * 1024, 64, }, - - { "m45pe80", 0x204014, 64 * 1024, 16, }, - { "m45pe16", 0x204015, 64 * 1024, 32, }, - - { "m25pe80", 0x208014, 64 * 1024, 16, }, - { "m25pe16", 0x208015, 64 * 1024, 32, SECT_4K, }, + { "m25p05", 0x202010, 0, 32 * 1024, 2, }, + { "m25p10", 0x202011, 0, 32 * 1024, 4, }, + { "m25p20", 0x202012, 0, 64 * 1024, 4, }, + { "m25p40", 0x202013, 0, 64 * 1024, 8, }, + { "m25p80", 0, 0, 64 * 1024, 16, }, + { "m25p16", 0x202015, 0, 64 * 1024, 32, }, + { "m25p32", 0x202016, 0, 64 * 1024, 64, }, + { "m25p64", 0x202017, 0, 64 * 1024, 128, }, + { "m25p128", 0x202018, 0, 256 * 1024, 64, }, + + { "m45pe80", 0x204014, 0, 64 * 1024, 16, }, + { "m45pe16", 0x204015, 0, 64 * 1024, 32, }, + + { "m25pe80", 0x208014, 0, 64 * 1024, 16, }, + { "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, }, /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ - { "w25x10", 0xef3011, 64 * 1024, 2, SECT_4K, }, - { "w25x20", 0xef3012, 64 * 1024, 4, SECT_4K, }, - { "w25x40", 0xef3013, 64 * 1024, 8, SECT_4K, }, - { "w25x80", 0xef3014, 64 * 1024, 16, SECT_4K, }, - { "w25x16", 0xef3015, 64 * 1024, 32, SECT_4K, }, - { "w25x32", 0xef3016, 64 * 1024, 64, SECT_4K, }, - { "w25x64", 0xef3017, 64 * 1024, 128, SECT_4K, }, + { "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, }, + { "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, }, + { "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, }, + { "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, }, + { "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, }, + { "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, }, + { "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, }, }; static struct flash_info *__devinit jedec_probe(struct spi_device *spi) @@ -547,6 +550,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) u8 code = OPCODE_RDID; u8 id[3]; u32 jedec; + u16 ext_jedec; struct flash_info *info; /* JEDEC also defines an optional "extended device information" @@ -565,10 +569,14 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) jedec = jedec << 8; jedec |= id[2]; + ext_jedec = id[3] << 8 | id[4]; + for (tmp = 0, info = m25p_data; tmp < ARRAY_SIZE(m25p_data); tmp++, info++) { if (info->jedec_id == jedec) + if (ext_jedec != 0 && info->ext_id != ext_jedec) + continue; return info; } dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); -- cgit v0.10.2 From bb0eb217c980d50c45f3e793b4dcc70ab9ee820d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 Aug 2008 12:40:50 +0300 Subject: [MTD] Define and use MTD_FAIL_ADDR_UNKNOWN instead of 0xffffffff Signed-off-by: Adrian Hunter Signed-off-by: David Woodhouse diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 2972a5e..789842d 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -444,7 +444,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) return -EINVAL; } - instr->fail_addr = 0xffffffff; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; /* make a local copy of instr to avoid modifying the caller's struct */ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); @@ -493,7 +493,7 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) /* sanity check: should never happen since * block alignment has been checked above */ BUG_ON(err == -EINVAL); - if (erase->fail_addr != 0xffffffff) + if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN) instr->fail_addr = erase->fail_addr + offset; break; } diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index edb90b5..8e77e36 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -214,7 +214,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr) instr->addr += part->offset; ret = part->master->erase(part->master, instr); if (ret) { - if (instr->fail_addr != 0xffffffff) + if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) instr->fail_addr -= part->offset; instr->addr -= part->offset; } @@ -226,7 +226,7 @@ void mtd_erase_callback(struct erase_info *instr) if (instr->mtd->erase == part_erase) { struct mtd_part *part = PART(instr->mtd); - if (instr->fail_addr != 0xffffffff) + if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) instr->fail_addr -= part->offset; instr->addr -= part->offset; } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d1129ba..5822805 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2042,7 +2042,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, return -EINVAL; } - instr->fail_addr = 0xffffffff; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; /* Grab the lock and see if the device is available */ nand_get_device(chip, mtd, FL_ERASING); diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 926cf3a..90ed319 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -1794,7 +1794,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) return -EINVAL; } - instr->fail_addr = 0xffffffff; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; /* Grab the lock and see if the device is available */ onenand_get_device(mtd, FL_ERASING); diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index dddb2a6..259461b 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -68,7 +68,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, instr->len = c->sector_size; instr->callback = jffs2_erase_callback; instr->priv = (unsigned long)(&instr[1]); - instr->fail_addr = 0xffffffff; + instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; ((struct erase_priv_struct *)instr->priv)->jeb = jeb; ((struct erase_priv_struct *)instr->priv)->c = c; @@ -175,7 +175,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock { /* For NAND, if the failure did not occur at the device level for a specific physical page, don't bother updating the bad block table. */ - if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) { + if (jffs2_cleanmarker_oob(c) && (bad_offset != MTD_FAIL_ADDR_UNKNOWN)) { /* We had a device-level failure to erase. Let's see if we've failed too many times. */ if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 9226365..eae26bb 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -25,8 +25,10 @@ #define MTD_ERASE_DONE 0x08 #define MTD_ERASE_FAILED 0x10 +#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff + /* If the erase fails, fail_addr might indicate exactly which block failed. If - fail_addr = 0xffffffff, the failure was not at the device level or was not + fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not specific to any particular block. */ struct erase_info { struct mtd_info *mtd; -- cgit v0.10.2 From 36cd4fb5d277f34fe9e4db0deac2d4efd7dff735 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 6 Aug 2008 10:08:46 +0300 Subject: [MTD] [OneNAND] Add OMAP2 / OMAP3 OneNAND driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This driver had resided in the OMAP tree but is now to be in MTD. Original authors were: Jarkko Lavinen and Juha Yrjölä IRQ and DMA support written by Timo Teras Signed-off-by: Adrian Hunter Signed-off-by: David Woodhouse diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index b94a61b..79fa79e 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -27,6 +27,13 @@ config MTD_ONENAND_GENERIC help Support for OneNAND flash via platform device driver. +config MTD_ONENAND_OMAP2 + tristate "OneNAND on OMAP2/OMAP3 support" + depends on MTD_ONENAND && (ARCH_OMAP2 || ARCH_OMAP3) + help + Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU + via the GPMC memory controller. + config MTD_ONENAND_OTP bool "OneNAND OTP Support" select HAVE_MTD_OTP diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile index 4d2eacf..64b6cc6 100644 --- a/drivers/mtd/onenand/Makefile +++ b/drivers/mtd/onenand/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o # Board specific. obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o +obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o # Simulator obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c new file mode 100644 index 0000000..40153ac --- /dev/null +++ b/drivers/mtd/onenand/omap2.c @@ -0,0 +1,777 @@ +/* + * linux/drivers/mtd/onenand/omap2.c + * + * OneNAND driver for OMAP2 / OMAP3 + * + * Copyright © 2005-2006 Nokia Corporation + * + * Author: Jarkko Lavinen and Juha Yrjölä + * IRQ and DMA support written by Timo Teras + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; see the file COPYING. If not, write to the Free Software + * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#define DRIVER_NAME "omap2-onenand" + +#define ONENAND_IO_SIZE SZ_128K +#define ONENAND_BUFRAM_SIZE (1024 * 5) + +struct omap2_onenand { + struct platform_device *pdev; + int gpmc_cs; + unsigned long phys_base; + int gpio_irq; + struct mtd_info mtd; + struct mtd_partition *parts; + struct onenand_chip onenand; + struct completion irq_done; + struct completion dma_done; + int dma_channel; + int freq; + int (*setup)(void __iomem *base, int freq); +}; + +static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) +{ + struct omap2_onenand *c = data; + + complete(&c->dma_done); +} + +static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id) +{ + struct omap2_onenand *c = dev_id; + + complete(&c->irq_done); + + return IRQ_HANDLED; +} + +static inline unsigned short read_reg(struct omap2_onenand *c, int reg) +{ + return readw(c->onenand.base + reg); +} + +static inline void write_reg(struct omap2_onenand *c, unsigned short value, + int reg) +{ + writew(value, c->onenand.base + reg); +} + +static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr) +{ + printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n", + msg, state, ctrl, intr); +} + +static void wait_warn(char *msg, int state, unsigned int ctrl, + unsigned int intr) +{ + printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x " + "intr 0x%04x\n", msg, state, ctrl, intr); +} + +static int omap2_onenand_wait(struct mtd_info *mtd, int state) +{ + struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); + unsigned int intr = 0; + unsigned int ctrl; + unsigned long timeout; + u32 syscfg; + + if (state == FL_RESETING) { + int i; + + for (i = 0; i < 20; i++) { + udelay(1); + intr = read_reg(c, ONENAND_REG_INTERRUPT); + if (intr & ONENAND_INT_MASTER) + break; + } + ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); + if (ctrl & ONENAND_CTRL_ERROR) { + wait_err("controller error", state, ctrl, intr); + return -EIO; + } + if (!(intr & ONENAND_INT_RESET)) { + wait_err("timeout", state, ctrl, intr); + return -EIO; + } + return 0; + } + + if (state != FL_READING) { + int result; + + /* Turn interrupts on */ + syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); + syscfg |= ONENAND_SYS_CFG1_IOBE; + write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); + + INIT_COMPLETION(c->irq_done); + if (c->gpio_irq) { + result = omap_get_gpio_datain(c->gpio_irq); + if (result == -1) { + ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); + intr = read_reg(c, ONENAND_REG_INTERRUPT); + wait_err("gpio error", state, ctrl, intr); + return -EIO; + } + } else + result = 0; + if (result == 0) { + int retry_cnt = 0; +retry: + result = wait_for_completion_timeout(&c->irq_done, + msecs_to_jiffies(20)); + if (result == 0) { + /* Timeout after 20ms */ + ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); + if (ctrl & ONENAND_CTRL_ONGO) { + /* + * The operation seems to be still going + * so give it some more time. + */ + retry_cnt += 1; + if (retry_cnt < 3) + goto retry; + intr = read_reg(c, + ONENAND_REG_INTERRUPT); + wait_err("timeout", state, ctrl, intr); + return -EIO; + } + intr = read_reg(c, ONENAND_REG_INTERRUPT); + if ((intr & ONENAND_INT_MASTER) == 0) + wait_warn("timeout", state, ctrl, intr); + } + } + } else { + /* Turn interrupts off */ + syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); + syscfg &= ~ONENAND_SYS_CFG1_IOBE; + write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); + + timeout = jiffies + msecs_to_jiffies(20); + while (time_before(jiffies, timeout)) { + intr = read_reg(c, ONENAND_REG_INTERRUPT); + if (intr & ONENAND_INT_MASTER) + break; + } + } + + intr = read_reg(c, ONENAND_REG_INTERRUPT); + ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); + + if (intr & ONENAND_INT_READ) { + int ecc = read_reg(c, ONENAND_REG_ECC_STATUS); + + if (ecc) { + unsigned int addr1, addr8; + + addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1); + addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8); + if (ecc & ONENAND_ECC_2BIT_ALL) { + printk(KERN_ERR "onenand_wait: ECC error = " + "0x%04x, addr1 %#x, addr8 %#x\n", + ecc, addr1, addr8); + mtd->ecc_stats.failed++; + return -EBADMSG; + } else if (ecc & ONENAND_ECC_1BIT_ALL) { + printk(KERN_NOTICE "onenand_wait: correctable " + "ECC error = 0x%04x, addr1 %#x, " + "addr8 %#x\n", ecc, addr1, addr8); + mtd->ecc_stats.corrected++; + } + } + } else if (state == FL_READING) { + wait_err("timeout", state, ctrl, intr); + return -EIO; + } + + if (ctrl & ONENAND_CTRL_ERROR) { + wait_err("controller error", state, ctrl, intr); + if (ctrl & ONENAND_CTRL_LOCK) + printk(KERN_ERR "onenand_wait: " + "Device is write protected!!!\n"); + return -EIO; + } + + if (ctrl & 0xFE9F) + wait_warn("unexpected controller status", state, ctrl, intr); + + return 0; +} + +static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area) +{ + struct onenand_chip *this = mtd->priv; + + if (ONENAND_CURRENT_BUFFERRAM(this)) { + if (area == ONENAND_DATARAM) + return mtd->writesize; + if (area == ONENAND_SPARERAM) + return mtd->oobsize; + } + + return 0; +} + +#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2) + +static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, + size_t count) +{ + struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); + struct onenand_chip *this = mtd->priv; + dma_addr_t dma_src, dma_dst; + int bram_offset; + unsigned long timeout; + void *buf = (void *)buffer; + size_t xtra; + volatile unsigned *done; + + bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; + if (bram_offset & 3 || (size_t)buf & 3 || count < 384) + goto out_copy; + + if (buf >= high_memory) { + struct page *p1; + + if (((size_t)buf & PAGE_MASK) != + ((size_t)(buf + count - 1) & PAGE_MASK)) + goto out_copy; + p1 = vmalloc_to_page(buf); + if (!p1) + goto out_copy; + buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); + } + + xtra = count & 3; + if (xtra) { + count -= xtra; + memcpy(buf + count, this->base + bram_offset + count, xtra); + } + + dma_src = c->phys_base + bram_offset; + dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE); + if (dma_mapping_error(&c->pdev->dev, dma_dst)) { + dev_err(&c->pdev->dev, + "Couldn't DMA map a %d byte buffer\n", + count); + goto out_copy; + } + + omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, + count >> 2, 1, 0, 0, 0); + omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_src, 0, 0); + omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_dst, 0, 0); + + INIT_COMPLETION(c->dma_done); + omap_start_dma(c->dma_channel); + + timeout = jiffies + msecs_to_jiffies(20); + done = &c->dma_done.done; + while (time_before(jiffies, timeout)) + if (*done) + break; + + dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); + + if (!*done) { + dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); + goto out_copy; + } + + return 0; + +out_copy: + memcpy(buf, this->base + bram_offset, count); + return 0; +} + +static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, + const unsigned char *buffer, + int offset, size_t count) +{ + struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); + struct onenand_chip *this = mtd->priv; + dma_addr_t dma_src, dma_dst; + int bram_offset; + unsigned long timeout; + void *buf = (void *)buffer; + volatile unsigned *done; + + bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; + if (bram_offset & 3 || (size_t)buf & 3 || count < 384) + goto out_copy; + + /* panic_write() may be in an interrupt context */ + if (in_interrupt()) + goto out_copy; + + if (buf >= high_memory) { + struct page *p1; + + if (((size_t)buf & PAGE_MASK) != + ((size_t)(buf + count - 1) & PAGE_MASK)) + goto out_copy; + p1 = vmalloc_to_page(buf); + if (!p1) + goto out_copy; + buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK); + } + + dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); + dma_dst = c->phys_base + bram_offset; + if (dma_mapping_error(&c->pdev->dev, dma_dst)) { + dev_err(&c->pdev->dev, + "Couldn't DMA map a %d byte buffer\n", + count); + return -1; + } + + omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, + count >> 2, 1, 0, 0, 0); + omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_src, 0, 0); + omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_dst, 0, 0); + + INIT_COMPLETION(c->dma_done); + omap_start_dma(c->dma_channel); + + timeout = jiffies + msecs_to_jiffies(20); + done = &c->dma_done.done; + while (time_before(jiffies, timeout)) + if (*done) + break; + + dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); + + if (!*done) { + dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); + goto out_copy; + } + + return 0; + +out_copy: + memcpy(this->base + bram_offset, buf, count); + return 0; +} + +#else + +int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, + size_t count); + +int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area, + const unsigned char *buffer, + int offset, size_t count); + +#endif + +#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2) + +static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, + size_t count) +{ + struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); + struct onenand_chip *this = mtd->priv; + dma_addr_t dma_src, dma_dst; + int bram_offset; + + bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; + /* DMA is not used. Revisit PM requirements before enabling it. */ + if (1 || (c->dma_channel < 0) || + ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) || + (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) { + memcpy(buffer, (__force void *)(this->base + bram_offset), + count); + return 0; + } + + dma_src = c->phys_base + bram_offset; + dma_dst = dma_map_single(&c->pdev->dev, buffer, count, + DMA_FROM_DEVICE); + if (dma_mapping_error(&c->pdev->dev, dma_dst)) { + dev_err(&c->pdev->dev, + "Couldn't DMA map a %d byte buffer\n", + count); + return -1; + } + + omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32, + count / 4, 1, 0, 0, 0); + omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_src, 0, 0); + omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_dst, 0, 0); + + INIT_COMPLETION(c->dma_done); + omap_start_dma(c->dma_channel); + wait_for_completion(&c->dma_done); + + dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); + + return 0; +} + +static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, + const unsigned char *buffer, + int offset, size_t count) +{ + struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); + struct onenand_chip *this = mtd->priv; + dma_addr_t dma_src, dma_dst; + int bram_offset; + + bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; + /* DMA is not used. Revisit PM requirements before enabling it. */ + if (1 || (c->dma_channel < 0) || + ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) || + (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) { + memcpy((__force void *)(this->base + bram_offset), buffer, + count); + return 0; + } + + dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, + DMA_TO_DEVICE); + dma_dst = c->phys_base + bram_offset; + if (dma_mapping_error(&c->pdev->dev, dma_dst)) { + dev_err(&c->pdev->dev, + "Couldn't DMA map a %d byte buffer\n", + count); + return -1; + } + + omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16, + count / 2, 1, 0, 0, 0); + omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_src, 0, 0); + omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, + dma_dst, 0, 0); + + INIT_COMPLETION(c->dma_done); + omap_start_dma(c->dma_channel); + wait_for_completion(&c->dma_done); + + dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); + + return 0; +} + +#else + +int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, + unsigned char *buffer, int offset, + size_t count); + +int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, + const unsigned char *buffer, + int offset, size_t count); + +#endif + +static struct platform_driver omap2_onenand_driver; + +static int __adjust_timing(struct device *dev, void *data) +{ + int ret = 0; + struct omap2_onenand *c; + + c = dev_get_drvdata(dev); + + BUG_ON(c->setup == NULL); + + /* DMA is not in use so this is all that is needed */ + /* Revisit for OMAP3! */ + ret = c->setup(c->onenand.base, c->freq); + + return ret; +} + +int omap2_onenand_rephase(void) +{ + return driver_for_each_device(&omap2_onenand_driver.driver, NULL, + NULL, __adjust_timing); +} + +static void __devexit omap2_onenand_shutdown(struct platform_device *pdev) +{ + struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); + + /* With certain content in the buffer RAM, the OMAP boot ROM code + * can recognize the flash chip incorrectly. Zero it out before + * soft reset. + */ + memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE); +} + +static int __devinit omap2_onenand_probe(struct platform_device *pdev) +{ + struct omap_onenand_platform_data *pdata; + struct omap2_onenand *c; + int r; + + pdata = pdev->dev.platform_data; + if (pdata == NULL) { + dev_err(&pdev->dev, "platform data missing\n"); + return -ENODEV; + } + + c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL); + if (!c) + return -ENOMEM; + + init_completion(&c->irq_done); + init_completion(&c->dma_done); + c->gpmc_cs = pdata->cs; + c->gpio_irq = pdata->gpio_irq; + c->dma_channel = pdata->dma_channel; + if (c->dma_channel < 0) { + /* if -1, don't use DMA */ + c->gpio_irq = 0; + } + + r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base); + if (r < 0) { + dev_err(&pdev->dev, "Cannot request GPMC CS\n"); + goto err_kfree; + } + + if (request_mem_region(c->phys_base, ONENAND_IO_SIZE, + pdev->dev.driver->name) == NULL) { + dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, " + "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE); + r = -EBUSY; + goto err_free_cs; + } + c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE); + if (c->onenand.base == NULL) { + r = -ENOMEM; + goto err_release_mem_region; + } + + if (pdata->onenand_setup != NULL) { + r = pdata->onenand_setup(c->onenand.base, c->freq); + if (r < 0) { + dev_err(&pdev->dev, "Onenand platform setup failed: " + "%d\n", r); + goto err_iounmap; + } + c->setup = pdata->onenand_setup; + } + + if (c->gpio_irq) { + if ((r = omap_request_gpio(c->gpio_irq)) < 0) { + dev_err(&pdev->dev, "Failed to request GPIO%d for " + "OneNAND\n", c->gpio_irq); + goto err_iounmap; + } + omap_set_gpio_direction(c->gpio_irq, 1); + + if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), + omap2_onenand_interrupt, IRQF_TRIGGER_RISING, + pdev->dev.driver->name, c)) < 0) + goto err_release_gpio; + } + + if (c->dma_channel >= 0) { + r = omap_request_dma(0, pdev->dev.driver->name, + omap2_onenand_dma_cb, (void *) c, + &c->dma_channel); + if (r == 0) { + omap_set_dma_write_mode(c->dma_channel, + OMAP_DMA_WRITE_NON_POSTED); + omap_set_dma_src_data_pack(c->dma_channel, 1); + omap_set_dma_src_burst_mode(c->dma_channel, + OMAP_DMA_DATA_BURST_8); + omap_set_dma_dest_data_pack(c->dma_channel, 1); + omap_set_dma_dest_burst_mode(c->dma_channel, + OMAP_DMA_DATA_BURST_8); + } else { + dev_info(&pdev->dev, + "failed to allocate DMA for OneNAND, " + "using PIO instead\n"); + c->dma_channel = -1; + } + } + + dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " + "base %p\n", c->gpmc_cs, c->phys_base, + c->onenand.base); + + c->pdev = pdev; + c->mtd.name = pdev->dev.bus_id; + c->mtd.priv = &c->onenand; + c->mtd.owner = THIS_MODULE; + + if (c->dma_channel >= 0) { + struct onenand_chip *this = &c->onenand; + + this->wait = omap2_onenand_wait; + if (cpu_is_omap34xx()) { + this->read_bufferram = omap3_onenand_read_bufferram; + this->write_bufferram = omap3_onenand_write_bufferram; + } else { + this->read_bufferram = omap2_onenand_read_bufferram; + this->write_bufferram = omap2_onenand_write_bufferram; + } + } + + if ((r = onenand_scan(&c->mtd, 1)) < 0) + goto err_release_dma; + + switch ((c->onenand.version_id >> 4) & 0xf) { + case 0: + c->freq = 40; + break; + case 1: + c->freq = 54; + break; + case 2: + c->freq = 66; + break; + case 3: + c->freq = 83; + break; + } + +#ifdef CONFIG_MTD_PARTITIONS + if (pdata->parts != NULL) + r = add_mtd_partitions(&c->mtd, pdata->parts, + pdata->nr_parts); + else +#endif + r = add_mtd_device(&c->mtd); + if (r < 0) + goto err_release_onenand; + + platform_set_drvdata(pdev, c); + + return 0; + +err_release_onenand: + onenand_release(&c->mtd); +err_release_dma: + if (c->dma_channel != -1) + omap_free_dma(c->dma_channel); + if (c->gpio_irq) + free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); +err_release_gpio: + if (c->gpio_irq) + omap_free_gpio(c->gpio_irq); +err_iounmap: + iounmap(c->onenand.base); +err_release_mem_region: + release_mem_region(c->phys_base, ONENAND_IO_SIZE); +err_free_cs: + gpmc_cs_free(c->gpmc_cs); +err_kfree: + kfree(c); + + return r; +} + +static int __devexit omap2_onenand_remove(struct platform_device *pdev) +{ + struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); + + BUG_ON(c == NULL); + +#ifdef CONFIG_MTD_PARTITIONS + if (c->parts) + del_mtd_partitions(&c->mtd); + else + del_mtd_device(&c->mtd); +#else + del_mtd_device(&c->mtd); +#endif + + onenand_release(&c->mtd); + if (c->dma_channel != -1) + omap_free_dma(c->dma_channel); + omap2_onenand_shutdown(pdev); + platform_set_drvdata(pdev, NULL); + if (c->gpio_irq) { + free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); + omap_free_gpio(c->gpio_irq); + } + iounmap(c->onenand.base); + release_mem_region(c->phys_base, ONENAND_IO_SIZE); + kfree(c); + + return 0; +} + +static struct platform_driver omap2_onenand_driver = { + .probe = omap2_onenand_probe, + .remove = omap2_onenand_remove, + .shutdown = omap2_onenand_shutdown, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init omap2_onenand_init(void) +{ + printk(KERN_INFO "OneNAND driver initializing\n"); + return platform_driver_register(&omap2_onenand_driver); +} + +static void __exit omap2_onenand_exit(void) +{ + platform_driver_unregister(&omap2_onenand_driver); +} + +module_init(omap2_onenand_init); +module_exit(omap2_onenand_exit); + +MODULE_ALIAS(DRIVER_NAME); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jarkko Lavinen "); +MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); diff --git a/include/asm-arm/arch-omap/onenand.h b/include/asm-arm/arch-omap/onenand.h index 6c959d0ce..e302371 100644 --- a/include/asm-arm/arch-omap/onenand.h +++ b/include/asm-arm/arch-omap/onenand.h @@ -16,6 +16,11 @@ struct omap_onenand_platform_data { int gpio_irq; struct mtd_partition *parts; int nr_parts; - int (*onenand_setup)(void __iomem *); + int (*onenand_setup)(void __iomem *, int freq); int dma_channel; }; + +int omap2_onenand_rephase(void); + +#define ONENAND_MAX_PARTITIONS 8 + -- cgit v0.10.2 From bde86fec7c822b6009d3cfefc20b76b8d34716af Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 14 Aug 2008 11:57:45 +0300 Subject: [JFFS2] Correct symlink name too long error code Signed-off-by: Adrian Hunter Signed-off-by: David Woodhouse diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index cd219ef..b1aaae8 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -311,7 +311,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char /* FIXME: If you care. We'd need to use frags for the target if it grows much more than this */ if (targetlen > 254) - return -EINVAL; + return -ENAMETOOLONG; ri = jffs2_alloc_raw_inode(); -- cgit v0.10.2 From 782b7a367d81da005d93b28cb00f9ae086773c24 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 14 Aug 2008 14:00:12 +0300 Subject: [MTD] [OneNAND] OMAP3: add delay for GPIO On OMAP3, the driver was occasionally not seeing the GPIO interrupt. Adding a small delay of one register read eliminates the problem. Signed-off-by: Adrian Hunter Signed-off-by: David Woodhouse diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 40153ac..34b4253 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -141,8 +141,13 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) /* Turn interrupts on */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); - syscfg |= ONENAND_SYS_CFG1_IOBE; - write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); + if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) { + syscfg |= ONENAND_SYS_CFG1_IOBE; + write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); + if (cpu_is_omap34xx()) + /* Add a delay to let GPIO settle */ + syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); + } INIT_COMPLETION(c->irq_done); if (c->gpio_irq) { -- cgit v0.10.2 From e6cf5df1838c28bb060ac45b5585e48e71bbc740 Mon Sep 17 00:00:00 2001 From: frans Date: Fri, 15 Aug 2008 23:14:31 +0200 Subject: [MTD] [NAND] nand_ecc.c: rewrite for improved performance This patch improves the performance of the ecc generation code by a factor of 18 on an INTEL D920 CPU, a factor of 7 on MIPS and a factor of 5 on ARM (NSLU2) Signed-off-by: Frans Meulenbroeks Signed-off-by: David Woodhouse diff --git a/Documentation/mtd/nand_ecc.txt b/Documentation/mtd/nand_ecc.txt new file mode 100644 index 0000000..bdf93b7 --- /dev/null +++ b/Documentation/mtd/nand_ecc.txt @@ -0,0 +1,714 @@ +Introduction +============ + +Having looked at the linux mtd/nand driver and more specific at nand_ecc.c +I felt there was room for optimisation. I bashed the code for a few hours +performing tricks like table lookup removing superfluous code etc. +After that the speed was increased by 35-40%. +Still I was not too happy as I felt there was additional room for improvement. + +Bad! I was hooked. +I decided to annotate my steps in this file. Perhaps it is useful to someone +or someone learns something from it. + + +The problem +=========== + +NAND flash (at least SLC one) typically has sectors of 256 bytes. +However NAND flash is not extremely reliable so some error detection +(and sometimes correction) is needed. + +This is done by means of a Hamming code. I'll try to explain it in +laymans terms (and apologies to all the pro's in the field in case I do +not use the right terminology, my coding theory class was almost 30 +years ago, and I must admit it was not one of my favourites). + +As I said before the ecc calculation is performed on sectors of 256 +bytes. This is done by calculating several parity bits over the rows and +columns. The parity used is even parity which means that the parity bit = 1 +if the data over which the parity is calculated is 1 and the parity bit = 0 +if the data over which the parity is calculated is 0. So the total +number of bits over the data over which the parity is calculated + the +parity bit is even. (see wikipedia if you can't follow this). +Parity is often calculated by means of an exclusive or operation, +sometimes also referred to as xor. In C the operator for xor is ^ + +Back to ecc. +Let's give a small figure: + +byte 0: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp2 rp4 ... rp14 +byte 1: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp2 rp4 ... rp14 +byte 2: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp3 rp4 ... rp14 +byte 3: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp3 rp4 ... rp14 +byte 4: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp2 rp5 ... rp14 +.... +byte 254: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp0 rp3 rp5 ... rp15 +byte 255: bit7 bit6 bit5 bit4 bit3 bit2 bit1 bit0 rp1 rp3 rp5 ... rp15 + cp1 cp0 cp1 cp0 cp1 cp0 cp1 cp0 + cp3 cp3 cp2 cp2 cp3 cp3 cp2 cp2 + cp5 cp5 cp5 cp5 cp4 cp4 cp4 cp4 + +This figure represents a sector of 256 bytes. +cp is my abbreviaton for column parity, rp for row parity. + +Let's start to explain column parity. +cp0 is the parity that belongs to all bit0, bit2, bit4, bit6. +so the sum of all bit0, bit2, bit4 and bit6 values + cp0 itself is even. +Similarly cp1 is the sum of all bit1, bit3, bit5 and bit7. +cp2 is the parity over bit0, bit1, bit4 and bit5 +cp3 is the parity over bit2, bit3, bit6 and bit7. +cp4 is the parity over bit0, bit1, bit2 and bit3. +cp5 is the parity over bit4, bit5, bit6 and bit7. +Note that each of cp0 .. cp5 is exactly one bit. + +Row parity actually works almost the same. +rp0 is the parity of all even bytes (0, 2, 4, 6, ... 252, 254) +rp1 is the parity of all odd bytes (1, 3, 5, 7, ..., 253, 255) +rp2 is the parity of all bytes 0, 1, 4, 5, 8, 9, ... +(so handle two bytes, then skip 2 bytes). +rp3 is covers the half rp2 does not cover (bytes 2, 3, 6, 7, 10, 11, ...) +for rp4 the rule is cover 4 bytes, skip 4 bytes, cover 4 bytes, skip 4 etc. +so rp4 calculates parity over bytes 0, 1, 2, 3, 8, 9, 10, 11, 16, ...) +and rp5 covers the other half, so bytes 4, 5, 6, 7, 12, 13, 14, 15, 20, .. +The story now becomes quite boring. I guess you get the idea. +rp6 covers 8 bytes then skips 8 etc +rp7 skips 8 bytes then covers 8 etc +rp8 covers 16 bytes then skips 16 etc +rp9 skips 16 bytes then covers 16 etc +rp10 covers 32 bytes then skips 32 etc +rp11 skips 32 bytes then covers 32 etc +rp12 covers 64 bytes then skips 64 etc +rp13 skips 64 bytes then covers 64 etc +rp14 covers 128 bytes then skips 128 +rp15 skips 128 bytes then covers 128 + +In the end the parity bits are grouped together in three bytes as +follows: +ECC Bit 7 Bit 6 Bit 5 Bit 4 Bit 3 Bit 2 Bit 1 Bit 0 +ECC 0 rp07 rp06 rp05 rp04 rp03 rp02 rp01 rp00 +ECC 1 rp15 rp14 rp13 rp12 rp11 rp10 rp09 rp08 +ECC 2 cp5 cp4 cp3 cp2 cp1 cp0 1 1 + +I detected after writing this that ST application note AN1823 +(http://www.st.com/stonline/books/pdf/docs/10123.pdf) gives a much +nicer picture.(but they use line parity as term where I use row parity) +Oh well, I'm graphically challenged, so suffer with me for a moment :-) +And I could not reuse the ST picture anyway for copyright reasons. + + +Attempt 0 +========= + +Implementing the parity calculation is pretty simple. +In C pseudocode: +for (i = 0; i < 256; i++) +{ + if (i & 0x01) + rp1 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1; + else + rp0 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp1; + if (i & 0x02) + rp3 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp3; + else + rp2 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp2; + if (i & 0x04) + rp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp5; + else + rp4 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp4; + if (i & 0x08) + rp7 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp7; + else + rp6 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp6; + if (i & 0x10) + rp9 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp9; + else + rp8 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp8; + if (i & 0x20) + rp11 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp11; + else + rp10 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp10; + if (i & 0x40) + rp13 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp13; + else + rp12 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp12; + if (i & 0x80) + rp15 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp15; + else + rp14 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ bit3 ^ bit2 ^ bit1 ^ bit0 ^ rp14; + cp0 = bit6 ^ bit4 ^ bit2 ^ bit0 ^ cp0; + cp1 = bit7 ^ bit5 ^ bit3 ^ bit1 ^ cp1; + cp2 = bit5 ^ bit4 ^ bit1 ^ bit0 ^ cp2; + cp3 = bit7 ^ bit6 ^ bit3 ^ bit2 ^ cp3 + cp4 = bit3 ^ bit2 ^ bit1 ^ bit0 ^ cp4 + cp5 = bit7 ^ bit6 ^ bit5 ^ bit4 ^ cp5 +} + + +Analysis 0 +========== + +C does have bitwise operators but not really operators to do the above +efficiently (and most hardware has no such instructions either). +Therefore without implementing this it was clear that the code above was +not going to bring me a Nobel prize :-) + +Fortunately the exclusive or operation is commutative, so we can combine +the values in any order. So instead of calculating all the bits +individually, let us try to rearrange things. +For the column parity this is easy. We can just xor the bytes and in the +end filter out the relevant bits. This is pretty nice as it will bring +all cp calculation out of the if loop. + +Similarly we can first xor the bytes for the various rows. +This leads to: + + +Attempt 1 +========= + +const char parity[256] = { + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 +}; + +void ecc1(const unsigned char *buf, unsigned char *code) +{ + int i; + const unsigned char *bp = buf; + unsigned char cur; + unsigned char rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; + unsigned char rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; + unsigned char par; + + par = 0; + rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0; + rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0; + rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0; + rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0; + + for (i = 0; i < 256; i++) + { + cur = *bp++; + par ^= cur; + if (i & 0x01) rp1 ^= cur; else rp0 ^= cur; + if (i & 0x02) rp3 ^= cur; else rp2 ^= cur; + if (i & 0x04) rp5 ^= cur; else rp4 ^= cur; + if (i & 0x08) rp7 ^= cur; else rp6 ^= cur; + if (i & 0x10) rp9 ^= cur; else rp8 ^= cur; + if (i & 0x20) rp11 ^= cur; else rp10 ^= cur; + if (i & 0x40) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x80) rp15 ^= cur; else rp14 ^= cur; + } + code[0] = + (parity[rp7] << 7) | + (parity[rp6] << 6) | + (parity[rp5] << 5) | + (parity[rp4] << 4) | + (parity[rp3] << 3) | + (parity[rp2] << 2) | + (parity[rp1] << 1) | + (parity[rp0]); + code[1] = + (parity[rp15] << 7) | + (parity[rp14] << 6) | + (parity[rp13] << 5) | + (parity[rp12] << 4) | + (parity[rp11] << 3) | + (parity[rp10] << 2) | + (parity[rp9] << 1) | + (parity[rp8]); + code[2] = + (parity[par & 0xf0] << 7) | + (parity[par & 0x0f] << 6) | + (parity[par & 0xcc] << 5) | + (parity[par & 0x33] << 4) | + (parity[par & 0xaa] << 3) | + (parity[par & 0x55] << 2); + code[0] = ~code[0]; + code[1] = ~code[1]; + code[2] = ~code[2]; +} + +Still pretty straightforward. The last three invert statements are there to +give a checksum of 0xff 0xff 0xff for an empty flash. In an empty flash +all data is 0xff, so the checksum then matches. + +I also introduced the parity lookup. I expected this to be the fastest +way to calculate the parity, but I will investigate alternatives later +on. + + +Analysis 1 +========== + +The code works, but is not terribly efficient. On my system it took +almost 4 times as much time as the linux driver code. But hey, if it was +*that* easy this would have been done long before. +No pain. no gain. + +Fortunately there is plenty of room for improvement. + +In step 1 we moved from bit-wise calculation to byte-wise calculation. +However in C we can also use the unsigned long data type and virtually +every modern microprocessor supports 32 bit operations, so why not try +to write our code in such a way that we process data in 32 bit chunks. + +Of course this means some modification as the row parity is byte by +byte. A quick analysis: +for the column parity we use the par variable. When extending to 32 bits +we can in the end easily calculate p0 and p1 from it. +(because par now consists of 4 bytes, contributing to rp1, rp0, rp1, rp0 +respectively) +also rp2 and rp3 can be easily retrieved from par as rp3 covers the +first two bytes and rp2 the last two bytes. + +Note that of course now the loop is executed only 64 times (256/4). +And note that care must taken wrt byte ordering. The way bytes are +ordered in a long is machine dependent, and might affect us. +Anyway, if there is an issue: this code is developed on x86 (to be +precise: a DELL PC with a D920 Intel CPU) + +And of course the performance might depend on alignment, but I expect +that the I/O buffers in the nand driver are aligned properly (and +otherwise that should be fixed to get maximum performance). + +Let's give it a try... + + +Attempt 2 +========= + +extern const char parity[256]; + +void ecc2(const unsigned char *buf, unsigned char *code) +{ + int i; + const unsigned long *bp = (unsigned long *)buf; + unsigned long cur; + unsigned long rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; + unsigned long rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; + unsigned long par; + + par = 0; + rp0 = 0; rp1 = 0; rp2 = 0; rp3 = 0; + rp4 = 0; rp5 = 0; rp6 = 0; rp7 = 0; + rp8 = 0; rp9 = 0; rp10 = 0; rp11 = 0; + rp12 = 0; rp13 = 0; rp14 = 0; rp15 = 0; + + for (i = 0; i < 64; i++) + { + cur = *bp++; + par ^= cur; + if (i & 0x01) rp5 ^= cur; else rp4 ^= cur; + if (i & 0x02) rp7 ^= cur; else rp6 ^= cur; + if (i & 0x04) rp9 ^= cur; else rp8 ^= cur; + if (i & 0x08) rp11 ^= cur; else rp10 ^= cur; + if (i & 0x10) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x20) rp15 ^= cur; else rp14 ^= cur; + } + /* + we need to adapt the code generation for the fact that rp vars are now + long; also the column parity calculation needs to be changed. + we'll bring rp4 to 15 back to single byte entities by shifting and + xoring + */ + rp4 ^= (rp4 >> 16); rp4 ^= (rp4 >> 8); rp4 &= 0xff; + rp5 ^= (rp5 >> 16); rp5 ^= (rp5 >> 8); rp5 &= 0xff; + rp6 ^= (rp6 >> 16); rp6 ^= (rp6 >> 8); rp6 &= 0xff; + rp7 ^= (rp7 >> 16); rp7 ^= (rp7 >> 8); rp7 &= 0xff; + rp8 ^= (rp8 >> 16); rp8 ^= (rp8 >> 8); rp8 &= 0xff; + rp9 ^= (rp9 >> 16); rp9 ^= (rp9 >> 8); rp9 &= 0xff; + rp10 ^= (rp10 >> 16); rp10 ^= (rp10 >> 8); rp10 &= 0xff; + rp11 ^= (rp11 >> 16); rp11 ^= (rp11 >> 8); rp11 &= 0xff; + rp12 ^= (rp12 >> 16); rp12 ^= (rp12 >> 8); rp12 &= 0xff; + rp13 ^= (rp13 >> 16); rp13 ^= (rp13 >> 8); rp13 &= 0xff; + rp14 ^= (rp14 >> 16); rp14 ^= (rp14 >> 8); rp14 &= 0xff; + rp15 ^= (rp15 >> 16); rp15 ^= (rp15 >> 8); rp15 &= 0xff; + rp3 = (par >> 16); rp3 ^= (rp3 >> 8); rp3 &= 0xff; + rp2 = par & 0xffff; rp2 ^= (rp2 >> 8); rp2 &= 0xff; + par ^= (par >> 16); + rp1 = (par >> 8); rp1 &= 0xff; + rp0 = (par & 0xff); + par ^= (par >> 8); par &= 0xff; + + code[0] = + (parity[rp7] << 7) | + (parity[rp6] << 6) | + (parity[rp5] << 5) | + (parity[rp4] << 4) | + (parity[rp3] << 3) | + (parity[rp2] << 2) | + (parity[rp1] << 1) | + (parity[rp0]); + code[1] = + (parity[rp15] << 7) | + (parity[rp14] << 6) | + (parity[rp13] << 5) | + (parity[rp12] << 4) | + (parity[rp11] << 3) | + (parity[rp10] << 2) | + (parity[rp9] << 1) | + (parity[rp8]); + code[2] = + (parity[par & 0xf0] << 7) | + (parity[par & 0x0f] << 6) | + (parity[par & 0xcc] << 5) | + (parity[par & 0x33] << 4) | + (parity[par & 0xaa] << 3) | + (parity[par & 0x55] << 2); + code[0] = ~code[0]; + code[1] = ~code[1]; + code[2] = ~code[2]; +} + +The parity array is not shown any more. Note also that for these +examples I kinda deviated from my regular programming style by allowing +multiple statements on a line, not using { } in then and else blocks +with only a single statement and by using operators like ^= + + +Analysis 2 +========== + +The code (of course) works, and hurray: we are a little bit faster than +the linux driver code (about 15%). But wait, don't cheer too quickly. +THere is more to be gained. +If we look at e.g. rp14 and rp15 we see that we either xor our data with +rp14 or with rp15. However we also have par which goes over all data. +This means there is no need to calculate rp14 as it can be calculated from +rp15 through rp14 = par ^ rp15; +(or if desired we can avoid calculating rp15 and calculate it from +rp14). That is why some places refer to inverse parity. +Of course the same thing holds for rp4/5, rp6/7, rp8/9, rp10/11 and rp12/13. +Effectively this means we can eliminate the else clause from the if +statements. Also we can optimise the calculation in the end a little bit +by going from long to byte first. Actually we can even avoid the table +lookups + +Attempt 3 +========= + +Odd replaced: + if (i & 0x01) rp5 ^= cur; else rp4 ^= cur; + if (i & 0x02) rp7 ^= cur; else rp6 ^= cur; + if (i & 0x04) rp9 ^= cur; else rp8 ^= cur; + if (i & 0x08) rp11 ^= cur; else rp10 ^= cur; + if (i & 0x10) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x20) rp15 ^= cur; else rp14 ^= cur; +with + if (i & 0x01) rp5 ^= cur; + if (i & 0x02) rp7 ^= cur; + if (i & 0x04) rp9 ^= cur; + if (i & 0x08) rp11 ^= cur; + if (i & 0x10) rp13 ^= cur; + if (i & 0x20) rp15 ^= cur; + + and outside the loop added: + rp4 = par ^ rp5; + rp6 = par ^ rp7; + rp8 = par ^ rp9; + rp10 = par ^ rp11; + rp12 = par ^ rp13; + rp14 = par ^ rp15; + +And after that the code takes about 30% more time, although the number of +statements is reduced. This is also reflected in the assembly code. + + +Analysis 3 +========== + +Very weird. Guess it has to do with caching or instruction parallellism +or so. I also tried on an eeePC (Celeron, clocked at 900 Mhz). Interesting +observation was that this one is only 30% slower (according to time) +executing the code as my 3Ghz D920 processor. + +Well, it was expected not to be easy so maybe instead move to a +different track: let's move back to the code from attempt2 and do some +loop unrolling. This will eliminate a few if statements. I'll try +different amounts of unrolling to see what works best. + + +Attempt 4 +========= + +Unrolled the loop 1, 2, 3 and 4 times. +For 4 the code starts with: + + for (i = 0; i < 4; i++) + { + cur = *bp++; + par ^= cur; + rp4 ^= cur; + rp6 ^= cur; + rp8 ^= cur; + rp10 ^= cur; + if (i & 0x1) rp13 ^= cur; else rp12 ^= cur; + if (i & 0x2) rp15 ^= cur; else rp14 ^= cur; + cur = *bp++; + par ^= cur; + rp5 ^= cur; + rp6 ^= cur; + ... + + +Analysis 4 +========== + +Unrolling once gains about 15% +Unrolling twice keeps the gain at about 15% +Unrolling three times gives a gain of 30% compared to attempt 2. +Unrolling four times gives a marginal improvement compared to unrolling +three times. + +I decided to proceed with a four time unrolled loop anyway. It was my gut +feeling that in the next steps I would obtain additional gain from it. + +The next step was triggered by the fact that par contains the xor of all +bytes and rp4 and rp5 each contain the xor of half of the bytes. +So in effect par = rp4 ^ rp5. But as xor is commutative we can also say +that rp5 = par ^ rp4. So no need to keep both rp4 and rp5 around. We can +eliminate rp5 (or rp4, but I already foresaw another optimisation). +The same holds for rp6/7, rp8/9, rp10/11 rp12/13 and rp14/15. + + +Attempt 5 +========= + +Effectively so all odd digit rp assignments in the loop were removed. +This included the else clause of the if statements. +Of course after the loop we need to correct things by adding code like: + rp5 = par ^ rp4; +Also the initial assignments (rp5 = 0; etc) could be removed. +Along the line I also removed the initialisation of rp0/1/2/3. + + +Analysis 5 +========== + +Measurements showed this was a good move. The run-time roughly halved +compared with attempt 4 with 4 times unrolled, and we only require 1/3rd +of the processor time compared to the current code in the linux kernel. + +However, still I thought there was more. I didn't like all the if +statements. Why not keep a running parity and only keep the last if +statement. Time for yet another version! + + +Attempt 6 +========= + +THe code within the for loop was changed to: + + for (i = 0; i < 4; i++) + { + cur = *bp++; tmppar = cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= tmppar; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp8 ^= tmppar; + + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp10 ^= tmppar; + + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; rp8 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; rp8 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp8 ^= cur; + cur = *bp++; tmppar ^= cur; rp8 ^= cur; + + cur = *bp++; tmppar ^= cur; rp4 ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; + + par ^= tmppar; + if ((i & 0x1) == 0) rp12 ^= tmppar; + if ((i & 0x2) == 0) rp14 ^= tmppar; + } + +As you can see tmppar is used to accumulate the parity within a for +iteration. In the last 3 statements is is added to par and, if needed, +to rp12 and rp14. + +While making the changes I also found that I could exploit that tmppar +contains the running parity for this iteration. So instead of having: +rp4 ^= cur; rp6 = cur; +I removed the rp6 = cur; statement and did rp6 ^= tmppar; on next +statement. A similar change was done for rp8 and rp10 + + +Analysis 6 +========== + +Measuring this code again showed big gain. When executing the original +linux code 1 million times, this took about 1 second on my system. +(using time to measure the performance). After this iteration I was back +to 0.075 sec. Actually I had to decide to start measuring over 10 +million interations in order not to loose too much accuracy. This one +definitely seemed to be the jackpot! + +There is a little bit more room for improvement though. There are three +places with statements: +rp4 ^= cur; rp6 ^= cur; +It seems more efficient to also maintain a variable rp4_6 in the while +loop; This eliminates 3 statements per loop. Of course after the loop we +need to correct by adding: + rp4 ^= rp4_6; + rp6 ^= rp4_6 +Furthermore there are 4 sequential assingments to rp8. This can be +encoded slightly more efficient by saving tmppar before those 4 lines +and later do rp8 = rp8 ^ tmppar ^ notrp8; +(where notrp8 is the value of rp8 before those 4 lines). +Again a use of the commutative property of xor. +Time for a new test! + + +Attempt 7 +========= + +The new code now looks like: + + for (i = 0; i < 4; i++) + { + cur = *bp++; tmppar = cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= tmppar; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp8 ^= tmppar; + + cur = *bp++; tmppar ^= cur; rp4_6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; rp10 ^= tmppar; + + notrp8 = tmppar; + cur = *bp++; tmppar ^= cur; rp4_6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; + rp8 = rp8 ^ tmppar ^ notrp8; + + cur = *bp++; tmppar ^= cur; rp4_6 ^= cur; + cur = *bp++; tmppar ^= cur; rp6 ^= cur; + cur = *bp++; tmppar ^= cur; rp4 ^= cur; + cur = *bp++; tmppar ^= cur; + + par ^= tmppar; + if ((i & 0x1) == 0) rp12 ^= tmppar; + if ((i & 0x2) == 0) rp14 ^= tmppar; + } + rp4 ^= rp4_6; + rp6 ^= rp4_6; + + +Not a big change, but every penny counts :-) + + +Analysis 7 +========== + +Acutally this made things worse. Not very much, but I don't want to move +into the wrong direction. Maybe something to investigate later. Could +have to do with caching again. + +Guess that is what there is to win within the loop. Maybe unrolling one +more time will help. I'll keep the optimisations from 7 for now. + + +Attempt 8 +========= + +Unrolled the loop one more time. + + +Analysis 8 +========== + +This makes things worse. Let's stick with attempt 6 and continue from there. +Although it seems that the code within the loop cannot be optimised +further there is still room to optimize the generation of the ecc codes. +We can simply calcualate the total parity. If this is 0 then rp4 = rp5 +etc. If the parity is 1, then rp4 = !rp5; +But if rp4 = rp5 we do not need rp5 etc. We can just write the even bits +in the result byte and then do something like + code[0] |= (code[0] << 1); +Lets test this. + + +Attempt 9 +========= + +Changed the code but again this slightly degrades performance. Tried all +kind of other things, like having dedicated parity arrays to avoid the +shift after parity[rp7] << 7; No gain. +Change the lookup using the parity array by using shift operators (e.g. +replace parity[rp7] << 7 with: +rp7 ^= (rp7 << 4); +rp7 ^= (rp7 << 2); +rp7 ^= (rp7 << 1); +rp7 &= 0x80; +No gain. + +The only marginal change was inverting the parity bits, so we can remove +the last three invert statements. + +Ah well, pity this does not deliver more. Then again 10 million +iterations using the linux driver code takes between 13 and 13.5 +seconds, whereas my code now takes about 0.73 seconds for those 10 +million iterations. So basically I've improved the performance by a +factor 18 on my system. Not that bad. Of course on different hardware +you will get different results. No warranties! + +But of course there is no such thing as a free lunch. The codesize almost +tripled (from 562 bytes to 1434 bytes). Then again, it is not that much. + + +Correcting errors +================= + +For correcting errors I again used the ST application note as a starter, +but I also peeked at the existing code. +The algorithm itself is pretty straightforward. Just xor the given and +the calculated ecc. If all bytes are 0 there is no problem. If 11 bits +are 1 we have one correctable bit error. If there is 1 bit 1, we have an +error in the given ecc code. +It proved to be fastest to do some table lookups. Performance gain +introduced by this is about a factor 2 on my system when a repair had to +be done, and 1% or so if no repair had to be done. +Code size increased from 330 bytes to 686 bytes for this function. +(gcc 4.2, -O3) + + +Conclusion +========== + +The gain when calculating the ecc is tremendous. Om my development hardware +a speedup of a factor of 18 for ecc calculation was achieved. On a test on an +embedded system with a MIPS core a factor 7 was obtained. +On a test with a Linksys NSLU2 (ARMv5TE processor) the speedup was a factor +5 (big endian mode, gcc 4.1.2, -O3) +For correction not much gain could be obtained (as bitflips are rare). Then +again there are also much less cycles spent there. + +It seems there is not much more gain possible in this, at least when +programmed in C. Of course it might be possible to squeeze something more +out of it with an assembler program, but due to pipeline behaviour etc +this is very tricky (at least for intel hw). + +Author: Frans Meulenbroeks +Copyright (C) 2008 Koninklijke Philips Electronics NV. diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index 918a806..7129da5 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -1,13 +1,18 @@ /* - * This file contains an ECC algorithm from Toshiba that detects and - * corrects 1 bit errors in a 256 byte block of data. + * This file contains an ECC algorithm that detects and corrects 1 bit + * errors in a 256 byte block of data. * * drivers/mtd/nand/nand_ecc.c * - * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com) - * Toshiba America Electronics Components, Inc. + * Copyright (C) 2008 Koninklijke Philips Electronics NV. + * Author: Frans Meulenbroeks * - * Copyright (C) 2006 Thomas Gleixner + * Completely replaces the previous ECC implementation which was written by: + * Steven J. Hill (sjhill@realitydiluted.com) + * Thomas Gleixner (tglx@linutronix.de) + * + * Information on how this algorithm works and how it was developed + * can be found in Documentation/nand/ecc.txt * * This file is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -23,174 +28,417 @@ * with this file; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * - * As a special exception, if other files instantiate templates or use - * macros or inline functions from these files, or you compile these - * files and link them with other works to produce a work based on these - * files, these files do not by themselves cause the resulting work to be - * covered by the GNU General Public License. However the source code for - * these files must still be made available in accordance with section (3) - * of the GNU General Public License. - * - * This exception does not invalidate any other reasons why a work based on - * this file might be covered by the GNU General Public License. */ +/* + * The STANDALONE macro is useful when running the code outside the kernel + * e.g. when running the code in a testbed or a benchmark program. + * When STANDALONE is used, the module related macros are commented out + * as well as the linux include files. + * Instead a private definition of mtd_into is given to satisfy the compiler + * (the code does not use mtd_info, so the code does not care) + */ +#ifndef STANDALONE #include #include #include #include +#else +typedef uint32_t unsigned long +struct mtd_info { + int dummy; +}; +#define EXPORT_SYMBOL(x) /* x */ + +#define MODULE_LICENSE(x) /* x */ +#define MODULE_AUTHOR(x) /* x */ +#define MODULE_DESCRIPTION(x) /* x */ +#endif + +/* + * invparity is a 256 byte table that contains the odd parity + * for each byte. So if the number of bits in a byte is even, + * the array element is 1, and when the number of bits is odd + * the array eleemnt is 0. + */ +static const char invparity[256] = { + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1 +}; /* - * Pre-calculated 256-way 1 byte column parity + * bitsperbyte contains the number of bits per byte + * this is only used for testing and repairing parity + * (a precalculated value slightly improves performance) */ -static const u_char nand_ecc_precalc_table[] = { - 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, - 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, - 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, - 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, - 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, - 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, - 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, - 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, - 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, - 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, - 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, - 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, - 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, - 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, - 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, - 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00 +static const char bitsperbyte[256] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, +}; + +/* + * addressbits is a lookup table to filter out the bits from the xor-ed + * ecc data that identify the faulty location. + * this is only used for repairing parity + * see the comments in nand_correct_data for more details + */ +static const char addressbits[256] = { + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, + 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, + 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, + 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, + 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, + 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, + 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, + 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03, + 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, + 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, + 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05, + 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07, + 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, + 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, + 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, + 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, + 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f, + 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, + 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f, + 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, + 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, + 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09, + 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, + 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f, + 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d, + 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f }; /** * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block - * @mtd: MTD block structure + * @mtd: MTD block structure (unused) * @dat: raw data * @ecc_code: buffer for ECC */ -int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, - u_char *ecc_code) +int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, + unsigned char *code) { - uint8_t idx, reg1, reg2, reg3, tmp1, tmp2; int i; + const uint32_t *bp = (uint32_t *)buf; + uint32_t cur; /* current value in buffer */ + /* rp0..rp15 are the various accumulated parities (per byte) */ + uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; + uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; + uint32_t par; /* the cumulative parity for all data */ + uint32_t tmppar; /* the cumulative parity for this iteration; + for rp12 and rp14 at the end of the loop */ + + par = 0; + rp4 = 0; + rp6 = 0; + rp8 = 0; + rp10 = 0; + rp12 = 0; + rp14 = 0; + + /* + * The loop is unrolled a number of times; + * This avoids if statements to decide on which rp value to update + * Also we process the data by longwords. + * Note: passing unaligned data might give a performance penalty. + * It is assumed that the buffers are aligned. + * tmppar is the cumulative sum of this iteration. + * needed for calculating rp12, rp14 and par + * also used as a performance improvement for rp6, rp8 and rp10 + */ + for (i = 0; i < 4; i++) { + cur = *bp++; + tmppar = cur; + rp4 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp6 ^= tmppar; + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp8 ^= tmppar; - /* Initialize variables */ - reg1 = reg2 = reg3 = 0; + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + rp6 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp6 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp10 ^= tmppar; - /* Build up column parity */ - for(i = 0; i < 256; i++) { - /* Get CP0 - CP5 from table */ - idx = nand_ecc_precalc_table[*dat++]; - reg1 ^= (idx & 0x3f); + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + rp6 ^= cur; + rp8 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp6 ^= cur; + rp8 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + rp8 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp8 ^= cur; - /* All bit XOR = 1 ? */ - if (idx & 0x40) { - reg3 ^= (uint8_t) i; - reg2 ^= ~((uint8_t) i); - } + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + rp6 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp6 ^= cur; + cur = *bp++; + tmppar ^= cur; + rp4 ^= cur; + cur = *bp++; + tmppar ^= cur; + + par ^= tmppar; + if ((i & 0x1) == 0) + rp12 ^= tmppar; + if ((i & 0x2) == 0) + rp14 ^= tmppar; } - /* Create non-inverted ECC code from line parity */ - tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */ - tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */ - tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */ - tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */ - tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */ - tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */ - tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */ - tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */ - - tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */ - tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */ - tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */ - tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */ - tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */ - tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */ - tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */ - tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */ - - /* Calculate final ECC code */ + /* + * handle the fact that we use longword operations + * we'll bring rp4..rp14 back to single byte entities by shifting and + * xoring first fold the upper and lower 16 bits, + * then the upper and lower 8 bits. + */ + rp4 ^= (rp4 >> 16); + rp4 ^= (rp4 >> 8); + rp4 &= 0xff; + rp6 ^= (rp6 >> 16); + rp6 ^= (rp6 >> 8); + rp6 &= 0xff; + rp8 ^= (rp8 >> 16); + rp8 ^= (rp8 >> 8); + rp8 &= 0xff; + rp10 ^= (rp10 >> 16); + rp10 ^= (rp10 >> 8); + rp10 &= 0xff; + rp12 ^= (rp12 >> 16); + rp12 ^= (rp12 >> 8); + rp12 &= 0xff; + rp14 ^= (rp14 >> 16); + rp14 ^= (rp14 >> 8); + rp14 &= 0xff; + + /* + * we also need to calculate the row parity for rp0..rp3 + * This is present in par, because par is now + * rp3 rp3 rp2 rp2 + * as well as + * rp1 rp0 rp1 rp0 + * First calculate rp2 and rp3 + * (and yes: rp2 = (par ^ rp3) & 0xff; but doing that did not + * give a performance improvement) + */ + rp3 = (par >> 16); + rp3 ^= (rp3 >> 8); + rp3 &= 0xff; + rp2 = par & 0xffff; + rp2 ^= (rp2 >> 8); + rp2 &= 0xff; + + /* reduce par to 16 bits then calculate rp1 and rp0 */ + par ^= (par >> 16); + rp1 = (par >> 8) & 0xff; + rp0 = (par & 0xff); + + /* finally reduce par to 8 bits */ + par ^= (par >> 8); + par &= 0xff; + + /* + * and calculate rp5..rp15 + * note that par = rp4 ^ rp5 and due to the commutative property + * of the ^ operator we can say: + * rp5 = (par ^ rp4); + * The & 0xff seems superfluous, but benchmarking learned that + * leaving it out gives slightly worse results. No idea why, probably + * it has to do with the way the pipeline in pentium is organized. + */ + rp5 = (par ^ rp4) & 0xff; + rp7 = (par ^ rp6) & 0xff; + rp9 = (par ^ rp8) & 0xff; + rp11 = (par ^ rp10) & 0xff; + rp13 = (par ^ rp12) & 0xff; + rp15 = (par ^ rp14) & 0xff; + + /* + * Finally calculate the ecc bits. + * Again here it might seem that there are performance optimisations + * possible, but benchmarks showed that on the system this is developed + * the code below is the fastest + */ #ifdef CONFIG_MTD_NAND_ECC_SMC - ecc_code[0] = ~tmp2; - ecc_code[1] = ~tmp1; + code[0] = + (invparity[rp7] << 7) | + (invparity[rp6] << 6) | + (invparity[rp5] << 5) | + (invparity[rp4] << 4) | + (invparity[rp3] << 3) | + (invparity[rp2] << 2) | + (invparity[rp1] << 1) | + (invparity[rp0]); + code[1] = + (invparity[rp15] << 7) | + (invparity[rp14] << 6) | + (invparity[rp13] << 5) | + (invparity[rp12] << 4) | + (invparity[rp11] << 3) | + (invparity[rp10] << 2) | + (invparity[rp9] << 1) | + (invparity[rp8]); #else - ecc_code[0] = ~tmp1; - ecc_code[1] = ~tmp2; + code[1] = + (invparity[rp7] << 7) | + (invparity[rp6] << 6) | + (invparity[rp5] << 5) | + (invparity[rp4] << 4) | + (invparity[rp3] << 3) | + (invparity[rp2] << 2) | + (invparity[rp1] << 1) | + (invparity[rp0]); + code[0] = + (invparity[rp15] << 7) | + (invparity[rp14] << 6) | + (invparity[rp13] << 5) | + (invparity[rp12] << 4) | + (invparity[rp11] << 3) | + (invparity[rp10] << 2) | + (invparity[rp9] << 1) | + (invparity[rp8]); #endif - ecc_code[2] = ((~reg1) << 2) | 0x03; - + code[2] = + (invparity[par & 0xf0] << 7) | + (invparity[par & 0x0f] << 6) | + (invparity[par & 0xcc] << 5) | + (invparity[par & 0x33] << 4) | + (invparity[par & 0xaa] << 3) | + (invparity[par & 0x55] << 2) | + 3; return 0; } EXPORT_SYMBOL(nand_calculate_ecc); -static inline int countbits(uint32_t byte) -{ - int res = 0; - - for (;byte; byte >>= 1) - res += byte & 0x01; - return res; -} - /** * nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @mtd: MTD block structure + * @mtd: MTD block structure (unused) * @dat: raw data read from the chip * @read_ecc: ECC from the chip * @calc_ecc: the ECC calculated from raw data * * Detect and correct a 1 bit error for 256 byte block */ -int nand_correct_data(struct mtd_info *mtd, u_char *dat, - u_char *read_ecc, u_char *calc_ecc) +int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) { - uint8_t s0, s1, s2; + int nr_bits; + unsigned char b0, b1, b2; + unsigned char byte_addr, bit_addr; + /* + * b0 to b2 indicate which bit is faulty (if any) + * we might need the xor result more than once, + * so keep them in a local var + */ #ifdef CONFIG_MTD_NAND_ECC_SMC - s0 = calc_ecc[0] ^ read_ecc[0]; - s1 = calc_ecc[1] ^ read_ecc[1]; - s2 = calc_ecc[2] ^ read_ecc[2]; + b0 = read_ecc[0] ^ calc_ecc[0]; + b1 = read_ecc[1] ^ calc_ecc[1]; #else - s1 = calc_ecc[0] ^ read_ecc[0]; - s0 = calc_ecc[1] ^ read_ecc[1]; - s2 = calc_ecc[2] ^ read_ecc[2]; + b0 = read_ecc[1] ^ calc_ecc[1]; + b1 = read_ecc[0] ^ calc_ecc[0]; #endif - if ((s0 | s1 | s2) == 0) - return 0; - - /* Check for a single bit error */ - if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 && - ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 && - ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) { - - uint32_t byteoffs, bitnum; + b2 = read_ecc[2] ^ calc_ecc[2]; - byteoffs = (s1 << 0) & 0x80; - byteoffs |= (s1 << 1) & 0x40; - byteoffs |= (s1 << 2) & 0x20; - byteoffs |= (s1 << 3) & 0x10; + /* check if there are any bitfaults */ - byteoffs |= (s0 >> 4) & 0x08; - byteoffs |= (s0 >> 3) & 0x04; - byteoffs |= (s0 >> 2) & 0x02; - byteoffs |= (s0 >> 1) & 0x01; + /* count nr of bits; use table lookup, faster than calculating it */ + nr_bits = bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]; - bitnum = (s2 >> 5) & 0x04; - bitnum |= (s2 >> 4) & 0x02; - bitnum |= (s2 >> 3) & 0x01; - - dat[byteoffs] ^= (1 << bitnum); - - return 1; + /* repeated if statements are slightly more efficient than switch ... */ + /* ordered in order of likelihood */ + if (nr_bits == 0) + return (0); /* no error */ + if (nr_bits == 11) { /* correctable error */ + /* + * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte + * cp 5/3/1 indicate the faulty bit. + * A lookup table (called addressbits) is used to filter + * the bits from the byte they are in. + * A marginal optimisation is possible by having three + * different lookup tables. + * One as we have now (for b0), one for b2 + * (that would avoid the >> 1), and one for b1 (with all values + * << 4). However it was felt that introducing two more tables + * hardly justify the gain. + * + * The b2 shift is there to get rid of the lowest two bits. + * We could also do addressbits[b2] >> 1 but for the + * performace it does not make any difference + */ + byte_addr = (addressbits[b1] << 4) + addressbits[b0]; + bit_addr = addressbits[b2 >> 2]; + /* flip the bit */ + buf[byte_addr] ^= (1 << bit_addr); + return (1); } - - if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1) - return 1; - - return -EBADMSG; + if (nr_bits == 1) + return (1); /* error in ecc data; no action needed */ + return -1; } EXPORT_SYMBOL(nand_correct_data); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Steven J. Hill "); +MODULE_AUTHOR("Frans Meulenbroeks "); MODULE_DESCRIPTION("Generic NAND ECC support"); -- cgit v0.10.2 From ccbcd6cba5ef6e071deb072188ad044921f6b91e Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 16 Aug 2008 11:01:31 +0100 Subject: [MTD] [NAND] Minor cleanup of nand_ecc.c Make the standalone stuff a little cleaner, fix some checkpatch warnings. Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index 7129da5..a8e8413 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -4,15 +4,15 @@ * * drivers/mtd/nand/nand_ecc.c * - * Copyright (C) 2008 Koninklijke Philips Electronics NV. - * Author: Frans Meulenbroeks + * Copyright © 2008 Koninklijke Philips Electronics NV. + * Author: Frans Meulenbroeks * * Completely replaces the previous ECC implementation which was written by: * Steven J. Hill (sjhill@realitydiluted.com) * Thomas Gleixner (tglx@linutronix.de) * * Information on how this algorithm works and how it was developed - * can be found in Documentation/nand/ecc.txt + * can be found in Documentation/mtd/nand_ecc.txt * * This file is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -35,7 +35,7 @@ * e.g. when running the code in a testbed or a benchmark program. * When STANDALONE is used, the module related macros are commented out * as well as the linux include files. - * Instead a private definition of mtd_into is given to satisfy the compiler + * Instead a private definition of mtd_info is given to satisfy the compiler * (the code does not use mtd_info, so the code does not care) */ #ifndef STANDALONE @@ -44,10 +44,8 @@ #include #include #else -typedef uint32_t unsigned long -struct mtd_info { - int dummy; -}; +#include +struct mtd_info; #define EXPORT_SYMBOL(x) /* x */ #define MODULE_LICENSE(x) /* x */ @@ -409,7 +407,7 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, /* repeated if statements are slightly more efficient than switch ... */ /* ordered in order of likelihood */ if (nr_bits == 0) - return (0); /* no error */ + return 0; /* no error */ if (nr_bits == 11) { /* correctable error */ /* * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte @@ -431,10 +429,10 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, bit_addr = addressbits[b2 >> 2]; /* flip the bit */ buf[byte_addr] ^= (1 << bit_addr); - return (1); + return 1; } if (nr_bits == 1) - return (1); /* error in ecc data; no action needed */ + return 1; /* error in ecc data; no action needed */ return -1; } EXPORT_SYMBOL(nand_correct_data); -- cgit v0.10.2 From 8ee991dd343df57910ff6947696afada9f02bf7e Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Sun, 17 Aug 2008 07:50:44 +0800 Subject: [MTD] removed unused #include The drivers below do not use LINUX_VERSION_CODE nor KERNEL_VERSION. drivers/mtd/maps/amd76xrom.c drivers/mtd/maps/ck804xrom.c drivers/mtd/maps/esb2rom.c This patch removes the said #include . Signed-off-by: Huang Weiyi Signed-off-by: David Woodhouse diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 948b86f3..d1eec7d 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -6,7 +6,6 @@ #include #include -#include #include #include #include diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index effaf7c..1a6feb4 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -9,7 +9,6 @@ #include #include -#include #include #include #include diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index aa64a47..bbbcdd4 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include -- cgit v0.10.2 From 75caf6b5acc6b895df9bdd36db631220e1096e9f Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 18 Aug 2008 16:23:53 +0100 Subject: [JFFS2] Fill in f_fsid field in jffs2_statfs() Signed-off-by: David Woodhouse diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 086c438..89e9b73 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -207,6 +207,8 @@ int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_files = 0; buf->f_ffree = 0; buf->f_namelen = JFFS2_MAX_NAME_LEN; + buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC; + buf->f_fsid.val[1] = c->mtd->index; spin_lock(&c->erase_completion_lock); avail = c->dirty_size + c->free_size; -- cgit v0.10.2 From 1077be58ad7baadd86e47e8b4f6209fa5b6364a5 Mon Sep 17 00:00:00 2001 From: frans Date: Wed, 20 Aug 2008 21:11:50 +0200 Subject: [MTD] [NAND] nand_ecc.c: fix big endian, strengthen test, add printk This patch for nand_ecc.c fixes three issues - fix code so it also works on big endian architectures - added a printk in case of an uncorrectable ecc error - strengthen the test for correctable errors (decreasing the chance that multiple bit faults by accident will be seen as correctable) Note: the big endian code is only tested in a testbed (running on big endian hardware) as I cannot rebuild and test a big endian kernel at the moment. However the only thing that can go wrong is if does not give __BIG_ENDIAN in that case. In my eyes very unlikely. Signed-off-by: Frans Meulenbroeks Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index a8e8413..d99e569 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -43,6 +43,7 @@ #include #include #include +#include #else #include struct mtd_info; @@ -51,6 +52,9 @@ struct mtd_info; #define MODULE_LICENSE(x) /* x */ #define MODULE_AUTHOR(x) /* x */ #define MODULE_DESCRIPTION(x) /* x */ + +#define printk printf +#define KERN_ERR "" #endif /* @@ -273,24 +277,38 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, /* * we also need to calculate the row parity for rp0..rp3 * This is present in par, because par is now - * rp3 rp3 rp2 rp2 + * rp3 rp3 rp2 rp2 in little endian and + * rp2 rp2 rp3 rp3 in big endian * as well as - * rp1 rp0 rp1 rp0 + * rp1 rp0 rp1 rp0 in little endian and + * rp0 rp1 rp0 rp1 in big endian * First calculate rp2 and rp3 - * (and yes: rp2 = (par ^ rp3) & 0xff; but doing that did not - * give a performance improvement) */ +#ifdef __BIG_ENDIAN + rp2 = (par >> 16); + rp2 ^= (rp2 >> 8); + rp2 &= 0xff; + rp3 = par & 0xffff; + rp3 ^= (rp3 >> 8); + rp3 &= 0xff; +#else rp3 = (par >> 16); rp3 ^= (rp3 >> 8); rp3 &= 0xff; rp2 = par & 0xffff; rp2 ^= (rp2 >> 8); rp2 &= 0xff; +#endif /* reduce par to 16 bits then calculate rp1 and rp0 */ par ^= (par >> 16); +#ifdef __BIG_ENDIAN + rp0 = (par >> 8) & 0xff; + rp1 = (par & 0xff); +#else rp1 = (par >> 8) & 0xff; rp0 = (par & 0xff); +#endif /* finally reduce par to 8 bits */ par ^= (par >> 8); @@ -381,7 +399,6 @@ EXPORT_SYMBOL(nand_calculate_ecc); int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { - int nr_bits; unsigned char b0, b1, b2; unsigned char byte_addr, bit_addr; @@ -401,14 +418,15 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, /* check if there are any bitfaults */ - /* count nr of bits; use table lookup, faster than calculating it */ - nr_bits = bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]; - /* repeated if statements are slightly more efficient than switch ... */ /* ordered in order of likelihood */ - if (nr_bits == 0) + + if ((b0 | b1 | b2) == 0) return 0; /* no error */ - if (nr_bits == 11) { /* correctable error */ + + if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) && + (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && + (((b2 ^ (b2 >> 1)) & 0x54) == 0x54)) { /* single bit error */ /* * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte * cp 5/3/1 indicate the faulty bit. @@ -430,9 +448,13 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, /* flip the bit */ buf[byte_addr] ^= (1 << bit_addr); return 1; + } - if (nr_bits == 1) + /* count nr of bits; use table lookup, faster than calculating it */ + if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) return 1; /* error in ecc data; no action needed */ + + printk(KERN_ERR "uncorrectable error : "); return -1; } EXPORT_SYMBOL(nand_correct_data); -- cgit v0.10.2 From 17c1d2be28e485c0c8b09661db39d5bf2605069d Mon Sep 17 00:00:00 2001 From: Alexey Korolev Date: Wed, 20 Aug 2008 22:32:08 +0100 Subject: [MTD] [NAND] Fix missing kernel-doc [Reported by Randy Dunlap] Signed-off-by: Alexey Korolev Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 5822805..d303db3 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -801,9 +801,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function * @mtd: mtd info structure * @chip: nand chip info structure - * @dataofs offset of requested data within the page - * @readlen data length - * @buf: buffer to store read data + * @data_offs: offset of requested data within the page + * @readlen: data length + * @bufpoi: buffer to store read data */ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) { diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index d99e569..fd19787 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -150,8 +150,8 @@ static const char addressbits[256] = { /** * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block * @mtd: MTD block structure (unused) - * @dat: raw data - * @ecc_code: buffer for ECC + * @buf: input buffer with raw data + * @code: output buffer with ECC */ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, unsigned char *code) @@ -390,7 +390,7 @@ EXPORT_SYMBOL(nand_calculate_ecc); /** * nand_correct_data - [NAND Interface] Detect and correct bit error(s) * @mtd: MTD block structure (unused) - * @dat: raw data read from the chip + * @buf: raw data read from the chip * @read_ecc: ECC from the chip * @calc_ecc: the ECC calculated from raw data * diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 81774e5..733d3f3 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -248,6 +248,7 @@ struct nand_hw_control { * @read_page_raw: function to read a raw page without ECC * @write_page_raw: function to write a raw page without ECC * @read_page: function to read a page according to the ecc generator requirements + * @read_subpage: function to read parts of the page covered by ECC. * @write_page: function to write a page according to the ecc generator requirements * @read_oob: function to read chip OOB data * @write_oob: function to write chip OOB data -- cgit v0.10.2 From ee974e01e5ef2914036f08c8e41d1a3fa8bfc9d9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 20 Aug 2008 16:37:26 -0700 Subject: clocksource: check range Check that the value being passed to parse_pmtmr() does not exceed the limits of pmtmr_ioport. Signed-off-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 5ca1d80..3df3384 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -226,9 +226,12 @@ static int __init parse_pmtmr(char *arg) if (strict_strtoul(arg, 16, &base)) return -EINVAL; - +#ifdef CONFIG_X86_64 + if (base > UINT_MAX) + return -ERANGE; +#endif printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n", - (unsigned int)pmtmr_ioport, base); + pmtmr_ioport, base); pmtmr_ioport = base; return 1; -- cgit v0.10.2 From 1aa5dfb751d275ae7117d3b73ac423b4a46f2a73 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 20 Aug 2008 16:37:28 -0700 Subject: clocksource: keep track of original clocksource frequency The clocksource frequency is represented by clocksource->mult/2^(clocksource->shift). Currently, when NTP makes adjustments to the clock frequency, they are made directly to the mult value. This has the drawback that once changed, we cannot know what the orignal mult value was, or how much adjustment has been applied. This property causes problems in calculating proper ntp intervals when switching back and forth between clocksources. This patch separates the current mult value into a mult and mult_orig pair. The mult_orig value stays constant, while the ntp clocksource adjustments are done only to the mult value. This allows for correct ntp interval calculation and additionally lays the groundwork for a new notion of time, what I'm calling the monotonic-raw time, which is introduced in a following patch. Signed-off-by: John Stultz Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 55e434f..f0a7fb9 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -45,7 +45,8 @@ struct clocksource; * @read: returns a cycle value * @mask: bitmask for two's complement * subtraction of non 64 bit counters - * @mult: cycle to nanosecond multiplier + * @mult: cycle to nanosecond multiplier (adjusted by NTP) + * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP) * @shift: cycle to nanosecond divisor (power of two) * @flags: flags describing special properties * @vread: vsyscall based read @@ -63,6 +64,7 @@ struct clocksource { cycle_t (*read)(void); cycle_t mask; u32 mult; + u32 mult_orig; u32 shift; unsigned long flags; cycle_t (*vread)(void); @@ -201,16 +203,17 @@ static inline void clocksource_calculate_interval(struct clocksource *c, { u64 tmp; - /* XXX - All of this could use a whole lot of optimization */ + /* Do the ns -> cycle conversion first, using original mult */ tmp = length_nsec; tmp <<= c->shift; - tmp += c->mult/2; - do_div(tmp, c->mult); + tmp += c->mult_orig/2; + do_div(tmp, c->mult_orig); c->cycle_interval = (cycle_t)tmp; if (c->cycle_interval == 0) c->cycle_interval = 1; + /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ c->xtime_interval = (u64)c->cycle_interval * c->mult; } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 093d4ac..9ed2eec 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c) unsigned long flags; int ret; + /* save mult_orig on registration */ + c->mult_orig = c->mult; + spin_lock_irqsave(&clocksource_lock, flags); ret = clocksource_enqueue(c); if (!ret) diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 4c256fd..1ca9955 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = { .read = jiffies_read, .mask = 0xffffffff, /*32bits*/ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ + .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT, .shift = JIFFIES_SHIFT, }; -- cgit v0.10.2 From 9a055117d3d9cb562f83f8d4cd88772761f4cab0 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Wed, 20 Aug 2008 16:37:28 -0700 Subject: clocksource: introduce clocksource_forward_now() To keep the raw monotonic patch simple first introduce clocksource_forward_now(), which takes care of the offset since the last update_wall_time() call and adds it to the clock, so there is no need anymore to deal with it explicitly at various places, which need to make significant changes to the clock. This is also gets rid of the timekeeping_suspend_nsecs, instead of waiting until resume, the value is accumulated during suspend. In the end there is only a single user of __get_nsec_offset() left, so I integrated it back to getnstimeofday(). Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e91c29f..83d3555 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -58,27 +58,23 @@ struct clocksource *clock; #ifdef CONFIG_GENERIC_TIME /** - * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook + * clocksource_forward_now - update clock to the current time * - * private function, must hold xtime_lock lock when being - * called. Returns the number of nanoseconds since the - * last call to update_wall_time() (adjusted by NTP scaling) + * Forward the current clock to update its state since the last call to + * update_wall_time(). This is useful before significant clock changes, + * as it avoids having to deal with this time offset explicitly. */ -static inline s64 __get_nsec_offset(void) +static void clocksource_forward_now(void) { cycle_t cycle_now, cycle_delta; - s64 ns_offset; + s64 nsec; - /* read clocksource: */ cycle_now = clocksource_read(clock); - - /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; + clock->cycle_last = cycle_now; - /* convert to nanoseconds: */ - ns_offset = cyc2ns(clock, cycle_delta); - - return ns_offset; + nsec = cyc2ns(clock, cycle_delta); + timespec_add_ns(&xtime, nsec); } /** @@ -89,6 +85,7 @@ static inline s64 __get_nsec_offset(void) */ void getnstimeofday(struct timespec *ts) { + cycle_t cycle_now, cycle_delta; unsigned long seq; s64 nsecs; @@ -96,7 +93,15 @@ void getnstimeofday(struct timespec *ts) seq = read_seqbegin(&xtime_lock); *ts = xtime; - nsecs = __get_nsec_offset(); + + /* read clocksource: */ + cycle_now = clocksource_read(clock); + + /* calculate the delta since the last update_wall_time: */ + cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; + + /* convert to nanoseconds: */ + nsecs = cyc2ns(clock, cycle_delta); } while (read_seqretry(&xtime_lock, seq)); @@ -129,22 +134,22 @@ EXPORT_SYMBOL(do_gettimeofday); */ int do_settimeofday(struct timespec *tv) { + struct timespec ts_delta; unsigned long flags; - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irqsave(&xtime_lock, flags); - nsec -= __get_nsec_offset(); + clocksource_forward_now(); + + ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; + ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; + wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + xtime = *tv; - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); update_xtime_cache(0); clock->error = 0; @@ -170,22 +175,17 @@ EXPORT_SYMBOL(do_settimeofday); static void change_clocksource(void) { struct clocksource *new; - cycle_t now; - u64 nsec; new = clocksource_get_next(); if (clock == new) return; - new->cycle_last = 0; - now = clocksource_read(new); - nsec = __get_nsec_offset(); - timespec_add_ns(&xtime, nsec); + clocksource_forward_now(); clock = new; - clock->cycle_last = now; - + clock->cycle_last = 0; + clock->cycle_last = clocksource_read(new); clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -200,8 +200,8 @@ static void change_clocksource(void) */ } #else +static inline void clocksource_forward_now(void) { } static inline void change_clocksource(void) { } -static inline s64 __get_nsec_offset(void) { return 0; } #endif /** @@ -265,8 +265,6 @@ void __init timekeeping_init(void) static int timekeeping_suspended; /* time in seconds when suspend began */ static unsigned long timekeeping_suspend_time; -/* xtime offset when we went into suspend */ -static s64 timekeeping_suspend_nsecs; /** * timekeeping_resume - Resumes the generic timekeeping subsystem. @@ -292,8 +290,6 @@ static int timekeeping_resume(struct sys_device *dev) wall_to_monotonic.tv_sec -= sleep_length; total_sleep_time += sleep_length; } - /* Make sure that we have the correct xtime reference */ - timespec_add_ns(&xtime, timekeeping_suspend_nsecs); update_xtime_cache(0); /* re-base the last cycle value */ clock->cycle_last = 0; @@ -319,8 +315,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) timekeeping_suspend_time = read_persistent_clock(); write_seqlock_irqsave(&xtime_lock, flags); - /* Get the current xtime offset */ - timekeeping_suspend_nsecs = __get_nsec_offset(); + clocksource_forward_now(); timekeeping_suspended = 1; write_sequnlock_irqrestore(&xtime_lock, flags); @@ -461,10 +456,10 @@ void update_wall_time(void) */ while (offset >= clock->cycle_interval) { /* accumulate one interval */ - clock->xtime_nsec += clock->xtime_interval; - clock->cycle_last += clock->cycle_interval; offset -= clock->cycle_interval; + clock->cycle_last += clock->cycle_interval; + clock->xtime_nsec += clock->xtime_interval; if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; xtime.tv_sec++; -- cgit v0.10.2 From 2d42244ae71d6c7b0884b5664cf2eda30fb2ae68 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 20 Aug 2008 16:37:30 -0700 Subject: clocksource: introduce CLOCK_MONOTONIC_RAW In talking with Josip Loncaric, and his work on clock synchronization (see btime.sf.net), he mentioned that for really close synchronization, it is useful to have access to "hardware time", that is a notion of time that is not in any way adjusted by the clock slewing done to keep close time sync. Part of the issue is if we are using the kernel's ntp adjusted representation of time in order to measure how we should correct time, we can run into what Paul McKenney aptly described as "Painting a road using the lines we're painting as the guide". I had been thinking of a similar problem, and was trying to come up with a way to give users access to a purely hardware based time representation that avoided users having to know the underlying frequency and mask values needed to deal with the wide variety of possible underlying hardware counters. My solution is to introduce CLOCK_MONOTONIC_RAW. This exposes a nanosecond based time value, that increments starting at bootup and has no frequency adjustments made to it what so ever. The time is accessed from userspace via the posix_clock_gettime() syscall, passing CLOCK_MONOTONIC_RAW as the clock_id. Signed-off-by: John Stultz Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index f0a7fb9..f88d32f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -79,6 +79,7 @@ struct clocksource { /* timekeeping specific data, ignore */ cycle_t cycle_interval; u64 xtime_interval; + u32 raw_interval; /* * Second part is written at each timer interrupt * Keep it in a different cache line to dirty no @@ -87,6 +88,7 @@ struct clocksource { cycle_t cycle_last ____cacheline_aligned_in_smp; u64 xtime_nsec; s64 error; + struct timespec raw_time; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ @@ -215,6 +217,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ c->xtime_interval = (u64)c->cycle_interval * c->mult; + c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; } diff --git a/include/linux/time.h b/include/linux/time.h index e15206a..205f974 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -117,6 +117,7 @@ extern int do_setitimer(int which, struct itimerval *value, extern unsigned int alarm_setitimer(unsigned int seconds); extern int do_getitimer(int which, struct itimerval *value); extern void getnstimeofday(struct timespec *tv); +extern void getrawmonotonic(struct timespec *ts); extern void getboottime(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts); @@ -214,6 +215,7 @@ struct itimerval { #define CLOCK_MONOTONIC 1 #define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_THREAD_CPUTIME_ID 3 +#define CLOCK_MONOTONIC_RAW 4 /* * The IDs of various hardware clocks: diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d579..d3c66b5 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -223,6 +223,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) } /* + * Get monotonic time for posix timers + */ +static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) +{ + getrawmonotonic(tp); + return 0; +} + +/* * Initialize everything, well, just everything in Posix clocks/timers ;) */ static __init int init_posix_timers(void) @@ -235,9 +244,15 @@ static __init int init_posix_timers(void) .clock_get = posix_ktime_get_ts, .clock_set = do_posix_clock_nosettime, }; + struct k_clock clock_monotonic_raw = { + .clock_getres = hrtimer_get_res, + .clock_get = posix_get_monotonic_raw, + .clock_set = do_posix_clock_nosettime, + }; register_posix_clock(CLOCK_REALTIME, &clock_realtime); register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); + register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof (struct k_itimer), 0, SLAB_PANIC, diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 83d3555..5099c95 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -75,6 +75,9 @@ static void clocksource_forward_now(void) nsec = cyc2ns(clock, cycle_delta); timespec_add_ns(&xtime, nsec); + + nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; + clock->raw_time.tv_nsec += nsec; } /** @@ -183,6 +186,8 @@ static void change_clocksource(void) clocksource_forward_now(); + new->raw_time = clock->raw_time; + clock = new; clock->cycle_last = 0; clock->cycle_last = clocksource_read(new); @@ -205,6 +210,39 @@ static inline void change_clocksource(void) { } #endif /** + * getrawmonotonic - Returns the raw monotonic time in a timespec + * @ts: pointer to the timespec to be set + * + * Returns the raw monotonic time (completely un-modified by ntp) + */ +void getrawmonotonic(struct timespec *ts) +{ + unsigned long seq; + s64 nsecs; + cycle_t cycle_now, cycle_delta; + + do { + seq = read_seqbegin(&xtime_lock); + + /* read clocksource: */ + cycle_now = clocksource_read(clock); + + /* calculate the delta since the last update_wall_time: */ + cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; + + /* convert to nanoseconds: */ + nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; + + *ts = clock->raw_time; + + } while (read_seqretry(&xtime_lock, seq)); + + timespec_add_ns(ts, nsecs); +} +EXPORT_SYMBOL(getrawmonotonic); + + +/** * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres */ int timekeeping_valid_for_hres(void) @@ -466,6 +504,12 @@ void update_wall_time(void) second_overflow(); } + clock->raw_time.tv_nsec += clock->raw_interval; + if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { + clock->raw_time.tv_nsec -= NSEC_PER_SEC; + clock->raw_time.tv_sec++; + } + /* accumulate error between NTP and clock interval */ clock->error += tick_length; clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); -- cgit v0.10.2 From d82f0b0f6f1a0a25afc288fb7135b1601fe6df18 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 20 Aug 2008 16:46:04 -0700 Subject: migrate_timers: add comment, use spinlock_irq() Add the comment to explain why the double lock in migrate_timers() can't deadlock. Change the code to use spinlock_irq() instead of local_irq_disable() + spin_lock(). Signed-off-by: Oleg Nesterov Acked-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8e4dce..03ea137 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1620,9 +1620,11 @@ static void migrate_hrtimers(int cpu) new_base = &get_cpu_var(hrtimer_bases); tick_cancel_sched_timer(cpu); - - local_irq_disable(); - spin_lock(&new_base->lock); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. + */ + spin_lock_irq(&new_base->lock); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { @@ -1631,8 +1633,7 @@ static void migrate_hrtimers(int cpu) } spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); - local_irq_enable(); + spin_unlock_irq(&new_base->lock); put_cpu_var(hrtimer_bases); } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/kernel/timer.c b/kernel/timer.c index 03bc7f1..e8019cc 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1435,9 +1435,11 @@ static void __cpuinit migrate_timers(int cpu) BUG_ON(cpu_online(cpu)); old_base = per_cpu(tvec_bases, cpu); new_base = get_cpu_var(tvec_bases); - - local_irq_disable(); - spin_lock(&new_base->lock); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. + */ + spin_lock_irq(&new_base->lock); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); BUG_ON(old_base->running_timer); @@ -1452,8 +1454,7 @@ static void __cpuinit migrate_timers(int cpu) } spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); - local_irq_enable(); + spin_unlock_irq(&new_base->lock); put_cpu_var(tvec_bases); } #endif /* CONFIG_HOTPLUG_CPU */ -- cgit v0.10.2 From 916c7a855174e3b53d182b97a26b2e27a29726a1 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Wed, 20 Aug 2008 16:46:08 -0700 Subject: ntp: fix ADJ_OFFSET_SS_READ bug and do_adjtimex() cleanup Thanks to the review by Michael Kerrisk a bug in the recent ADJ_OFFSET_SS_READ option was discovered, where the ntp time_offset was inadvertently set by it. This fixes this by making the adjtime code more separate from the ntp_adjtime code (both of which really want to be separate syscalls). Signed-off-by: Roman Zippel Signed-off-by: Andrew Morton Acked-by: John Stultz Signed-off-by: Ingo Molnar diff --git a/include/linux/timex.h b/include/linux/timex.h index fc6035d..c00bcdd 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -141,8 +141,15 @@ struct timex { #define ADJ_MICRO 0x1000 /* select microsecond resolution */ #define ADJ_NANO 0x2000 /* select nanosecond resolution */ #define ADJ_TICK 0x4000 /* tick value */ + +#ifdef __KERNEL__ +#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */ +#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */ +#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */ +#else #define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */ -#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */ +#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */ +#endif /* xntp 3.4 compatibility names */ #define MOD_OFFSET ADJ_OFFSET diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5125ddd..c6921aa1 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -277,38 +277,50 @@ static inline void notify_cmos_timer(void) { } int do_adjtimex(struct timex *txc) { struct timespec ts; - long save_adjust, sec; int result; - /* In order to modify anything, you gotta be super-user! */ - if (txc->modes && !capable(CAP_SYS_TIME)) - return -EPERM; - - /* Now we validate the data before disabling interrupts */ - - if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { + /* Validate the data before disabling interrupts */ + if (txc->modes & ADJ_ADJTIME) { /* singleshot must not be used with any other mode bits */ - if (txc->modes & ~ADJ_OFFSET_SS_READ) + if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) return -EINVAL; + if (!(txc->modes & ADJ_OFFSET_READONLY) && + !capable(CAP_SYS_TIME)) + return -EPERM; + } else { + /* In order to modify anything, you gotta be super-user! */ + if (txc->modes && !capable(CAP_SYS_TIME)) + return -EPERM; + + /* if the quartz is off by more than 10% something is VERY wrong! */ + if (txc->modes & ADJ_TICK && + (txc->tick < 900000/USER_HZ || + txc->tick > 1100000/USER_HZ)) + return -EINVAL; + + if (txc->modes & ADJ_STATUS && time_state != TIME_OK) + hrtimer_cancel(&leap_timer); } - /* if the quartz is off by more than 10% something is VERY wrong ! */ - if (txc->modes & ADJ_TICK) - if (txc->tick < 900000/USER_HZ || - txc->tick > 1100000/USER_HZ) - return -EINVAL; - - if (time_state != TIME_OK && txc->modes & ADJ_STATUS) - hrtimer_cancel(&leap_timer); getnstimeofday(&ts); write_seqlock_irq(&xtime_lock); - /* Save for later - semantics of adjtime is to return old value */ - save_adjust = time_adjust; - /* If there are input parameters, then process them */ + if (txc->modes & ADJ_ADJTIME) { + long save_adjust = time_adjust; + + if (!(txc->modes & ADJ_OFFSET_READONLY)) { + /* adjtime() is independent from ntp_adjtime() */ + time_adjust = txc->offset; + ntp_update_frequency(); + } + txc->offset = save_adjust; + goto adj_done; + } if (txc->modes) { + long sec; + if (txc->modes & ADJ_STATUS) { if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { @@ -375,13 +387,8 @@ int do_adjtimex(struct timex *txc) if (txc->modes & ADJ_TAI && txc->constant > 0) time_tai = txc->constant; - if (txc->modes & ADJ_OFFSET) { - if (txc->modes == ADJ_OFFSET_SINGLESHOT) - /* adjtime() is independent from ntp_adjtime() */ - time_adjust = txc->offset; - else - ntp_update_offset(txc->offset); - } + if (txc->modes & ADJ_OFFSET) + ntp_update_offset(txc->offset); if (txc->modes & ADJ_TICK) tick_usec = txc->tick; @@ -389,19 +396,16 @@ int do_adjtimex(struct timex *txc) ntp_update_frequency(); } + txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, + NTP_SCALE_SHIFT); + if (!(time_status & STA_NANO)) + txc->offset /= NSEC_PER_USEC; + +adj_done: result = time_state; /* mostly `TIME_OK' */ if (time_status & (STA_UNSYNC|STA_CLOCKERR)) result = TIME_ERROR; - if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || - (txc->modes == ADJ_OFFSET_SS_READ)) - txc->offset = save_adjust; - else { - txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, - NTP_SCALE_SHIFT); - if (!(time_status & STA_NANO)) - txc->offset /= NSEC_PER_USEC; - } txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) * (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); -- cgit v0.10.2 From 377bf1e4ac2d894791733270604594c7c851ef83 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Thu, 21 Aug 2008 22:58:28 +0400 Subject: genirq: fix irq_desc->depth handling with DEBUG_SHIRQ When DEBUG_SHIRQ is selected, a spurious IRQ is issued before the setup_irq() initializes the desc->depth. An IRQ handler may call disable_irq_nosync(), but then setup_irq() will overwrite desc->depth, and upon enable_irq() we'll catch this WARN: ------------[ cut here ]------------ Badness at kernel/irq/manage.c:180 NIP: c0061ab8 LR: c0061f10 CTR: 00000000 REGS: cf83be50 TRAP: 0700 Not tainted (2.6.27-rc3-23450-g74919b0) MSR: 00021032 CR: 22042022 XER: 20000000 TASK = cf829100[5] 'events/0' THREAD: cf83a000 GPR00: c0061f10 cf83bf00 cf829100 c038e674 00000016 00000000 cf83bef8 00000038 GPR08: c0298910 00000000 c0310d28 cf83a000 00000c9c 1001a1a8 0fffe000 00800000 GPR16: ffffffff 00000000 007fff00 00000000 007ffeb0 c03320a0 c031095c c0310924 GPR24: cf8292ec cf807190 cf83a000 00009032 c038e6a4 c038e674 cf99b1cc c038e674 NIP [c0061ab8] __enable_irq+0x20/0x80 LR [c0061f10] enable_irq+0x50/0x70 Call Trace: [cf83bf00] [c038e674] irq_desc+0x630/0x9000 (unreliable) [cf83bf10] [c0061f10] enable_irq+0x50/0x70 [cf83bf30] [c01abe94] phy_change+0x68/0x108 [cf83bf50] [c0046394] run_workqueue+0xc4/0x16c [cf83bf90] [c0046834] worker_thread+0x74/0xd4 [cf83bfd0] [c004ab7c] kthread+0x48/0x84 [cf83bff0] [c00135e0] kernel_thread+0x44/0x60 Instruction dump: 4e800020 3d20c031 38a94214 4bffffcc 9421fff0 7c0802a6 93e1000c 7c7f1b78 90010014 8123001c 2f890000 409e001c <0fe00000> 80010014 83e1000c 38210010 That trace corresponds to this line: WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); The patch fixes the problem by moving the SHIRQ code below the setup_irq(). Unfortunately we can't easily move the SHIRQ code inside the setup_irq(), since it grabs a spinlock, so to prvent a 'real' IRQ from interfere us we should disable that IRQ. p.s. The driver in question is drivers/net/phy/phy.c. Signed-off-by: Anton Vorontsov Cc: David Woodhouse Signed-off-by: Ingo Molnar diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 77a51be..ae1b684 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -596,26 +596,29 @@ int request_irq(unsigned int irq, irq_handler_t handler, action->next = NULL; action->dev_id = dev_id; + retval = setup_irq(irq, action); + if (retval) + kfree(action); + #ifdef CONFIG_DEBUG_SHIRQ if (irqflags & IRQF_SHARED) { /* * It's a shared IRQ -- the driver ought to be prepared for it * to happen immediately, so let's make sure.... - * We do this before actually registering it, to make sure that - * a 'real' IRQ doesn't run in parallel with our fake + * We disable the irq to make sure that a 'real' IRQ doesn't + * run in parallel with our fake. */ unsigned long flags; + disable_irq(irq); local_irq_save(flags); + handler(irq, dev_id); + local_irq_restore(flags); + enable_irq(irq); } #endif - - retval = setup_irq(irq, action); - if (retval) - kfree(action); - return retval; } EXPORT_SYMBOL(request_irq); -- cgit v0.10.2 From dffc8d66544563fe00f176f230d5d8a5b45847bb Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Sat, 23 Aug 2008 13:56:21 +0800 Subject: [MTD] [NAND] au1550nd.c: remove unused #include It doesn't use LINUX_VERSION_CODE nor KERNEL_VERSION. This patch removes the said #include . Signed-off-by: Huang Weiyi Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 761946e..92c334f 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include -- cgit v0.10.2 From d68156cfad0fe09201dd049fff167a8a881427ad Mon Sep 17 00:00:00 2001 From: "Singh, Vimal" Date: Sat, 23 Aug 2008 18:18:34 +0200 Subject: [MTD] [NAND] nand_ecc.c: adding support for 512 byte ecc Support 512 byte ECC calculation [FM: updated two comments] Signed-off-by: Vimal Singh Signed-off-by: Frans Meulenbroeks Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index fd19787..868147a 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -42,6 +42,8 @@ #include #include #include +#include +#include #include #include #else @@ -148,8 +150,9 @@ static const char addressbits[256] = { }; /** - * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block - * @mtd: MTD block structure (unused) + * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte + * block + * @mtd: MTD block structure * @buf: input buffer with raw data * @code: output buffer with ECC */ @@ -158,13 +161,18 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, { int i; const uint32_t *bp = (uint32_t *)buf; + /* 256 or 512 bytes/ecc */ + const uint32_t eccsize_mult = + (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; uint32_t cur; /* current value in buffer */ - /* rp0..rp15 are the various accumulated parities (per byte) */ + /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; - uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15; + uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; + uint32_t uninitialized_var(rp17); /* to make compiler happy */ uint32_t par; /* the cumulative parity for all data */ uint32_t tmppar; /* the cumulative parity for this iteration; - for rp12 and rp14 at the end of the loop */ + for rp12, rp14 and rp16 at the end of the + loop */ par = 0; rp4 = 0; @@ -173,6 +181,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, rp10 = 0; rp12 = 0; rp14 = 0; + rp16 = 0; /* * The loop is unrolled a number of times; @@ -181,10 +190,10 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, * Note: passing unaligned data might give a performance penalty. * It is assumed that the buffers are aligned. * tmppar is the cumulative sum of this iteration. - * needed for calculating rp12, rp14 and par + * needed for calculating rp12, rp14, rp16 and par * also used as a performance improvement for rp6, rp8 and rp10 */ - for (i = 0; i < 4; i++) { + for (i = 0; i < eccsize_mult << 2; i++) { cur = *bp++; tmppar = cur; rp4 ^= cur; @@ -247,12 +256,14 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, rp12 ^= tmppar; if ((i & 0x2) == 0) rp14 ^= tmppar; + if (eccsize_mult == 2 && (i & 0x4) == 0) + rp16 ^= tmppar; } /* * handle the fact that we use longword operations - * we'll bring rp4..rp14 back to single byte entities by shifting and - * xoring first fold the upper and lower 16 bits, + * we'll bring rp4..rp14..rp16 back to single byte entities by + * shifting and xoring first fold the upper and lower 16 bits, * then the upper and lower 8 bits. */ rp4 ^= (rp4 >> 16); @@ -273,6 +284,11 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, rp14 ^= (rp14 >> 16); rp14 ^= (rp14 >> 8); rp14 &= 0xff; + if (eccsize_mult == 2) { + rp16 ^= (rp16 >> 16); + rp16 ^= (rp16 >> 8); + rp16 &= 0xff; + } /* * we also need to calculate the row parity for rp0..rp3 @@ -315,7 +331,7 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, par &= 0xff; /* - * and calculate rp5..rp15 + * and calculate rp5..rp15..rp17 * note that par = rp4 ^ rp5 and due to the commutative property * of the ^ operator we can say: * rp5 = (par ^ rp4); @@ -329,6 +345,8 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, rp11 = (par ^ rp10) & 0xff; rp13 = (par ^ rp12) & 0xff; rp15 = (par ^ rp14) & 0xff; + if (eccsize_mult == 2) + rp17 = (par ^ rp16) & 0xff; /* * Finally calculate the ecc bits. @@ -375,32 +393,46 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, (invparity[rp9] << 1) | (invparity[rp8]); #endif - code[2] = - (invparity[par & 0xf0] << 7) | - (invparity[par & 0x0f] << 6) | - (invparity[par & 0xcc] << 5) | - (invparity[par & 0x33] << 4) | - (invparity[par & 0xaa] << 3) | - (invparity[par & 0x55] << 2) | - 3; + if (eccsize_mult == 1) + code[2] = + (invparity[par & 0xf0] << 7) | + (invparity[par & 0x0f] << 6) | + (invparity[par & 0xcc] << 5) | + (invparity[par & 0x33] << 4) | + (invparity[par & 0xaa] << 3) | + (invparity[par & 0x55] << 2) | + 3; + else + code[2] = + (invparity[par & 0xf0] << 7) | + (invparity[par & 0x0f] << 6) | + (invparity[par & 0xcc] << 5) | + (invparity[par & 0x33] << 4) | + (invparity[par & 0xaa] << 3) | + (invparity[par & 0x55] << 2) | + (invparity[rp17] << 1) | + (invparity[rp16] << 0); return 0; } EXPORT_SYMBOL(nand_calculate_ecc); /** * nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @mtd: MTD block structure (unused) + * @mtd: MTD block structure * @buf: raw data read from the chip * @read_ecc: ECC from the chip * @calc_ecc: the ECC calculated from raw data * - * Detect and correct a 1 bit error for 256 byte block + * Detect and correct a 1 bit error for 256/512 byte block */ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { unsigned char b0, b1, b2; unsigned char byte_addr, bit_addr; + /* 256 or 512 bytes/ecc */ + const uint32_t eccsize_mult = + (((struct nand_chip *)mtd->priv)->ecc.size) >> 8; /* * b0 to b2 indicate which bit is faulty (if any) @@ -426,10 +458,12 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) && (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && - (((b2 ^ (b2 >> 1)) & 0x54) == 0x54)) { /* single bit error */ + ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) || + (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) { + /* single bit error */ /* - * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte - * cp 5/3/1 indicate the faulty bit. + * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty + * byte, cp 5/3/1 indicate the faulty bit. * A lookup table (called addressbits) is used to filter * the bits from the byte they are in. * A marginal optimisation is possible by having three @@ -443,7 +477,11 @@ int nand_correct_data(struct mtd_info *mtd, unsigned char *buf, * We could also do addressbits[b2] >> 1 but for the * performace it does not make any difference */ - byte_addr = (addressbits[b1] << 4) + addressbits[b0]; + if (eccsize_mult == 1) + byte_addr = (addressbits[b1] << 4) + addressbits[b0]; + else + byte_addr = (addressbits[b2 & 0x3] << 8) + + (addressbits[b1] << 4) + addressbits[b0]; bit_addr = addressbits[b2 >> 2]; /* flip the bit */ buf[byte_addr] ^= (1 << bit_addr); -- cgit v0.10.2 From e82374fd1a804e197fc2a54c3930e70c5d300abc Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 11 Aug 2008 22:22:27 +0200 Subject: pda_power: Check and handle return value of set_irq_wake The recent change in commit 2db873211ba47ef704c301f9ecf4a33413a0b649 forces the calls enable_irq_wake() and disable_irq_wake() to be balanced. But if in pda_power_suspend() the call to enable_irq_wake() fails (because attached gpio cannot wake up the CPU), the corresponding disable_irq_wake will WARN(). Fix it by storing success/failure of enable_irq_wake(). Signed-off-by: Robert Jarzmik Signed-off-by: Anton Vorontsov diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c index 0471ec7..d30bb76 100644 --- a/drivers/power/pda_power.c +++ b/drivers/power/pda_power.c @@ -334,13 +334,16 @@ static int pda_power_remove(struct platform_device *pdev) } #ifdef CONFIG_PM +static int ac_wakeup_enabled; +static int usb_wakeup_enabled; + static int pda_power_suspend(struct platform_device *pdev, pm_message_t state) { if (device_may_wakeup(&pdev->dev)) { if (ac_irq) - enable_irq_wake(ac_irq->start); + ac_wakeup_enabled = !enable_irq_wake(ac_irq->start); if (usb_irq) - enable_irq_wake(usb_irq->start); + usb_wakeup_enabled = !enable_irq_wake(usb_irq->start); } return 0; @@ -349,9 +352,9 @@ static int pda_power_suspend(struct platform_device *pdev, pm_message_t state) static int pda_power_resume(struct platform_device *pdev) { if (device_may_wakeup(&pdev->dev)) { - if (usb_irq) + if (usb_irq && usb_wakeup_enabled) disable_irq_wake(usb_irq->start); - if (ac_irq) + if (ac_irq && ac_wakeup_enabled) disable_irq_wake(ac_irq->start); } -- cgit v0.10.2 From 942ed161944b3476639916cf544e6975b29c985a Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 26 Aug 2008 21:09:59 +0100 Subject: power_supply: Add function to return system-wide power state Certain drivers benefit from knowing whether the system is on ac or battery, for instance when determining which backlight registers to read. This adds a simple call to determine whether there's an online power supply other than any batteries. Signed-off-by: Matthew Garrett Signed-off-by: Anton Vorontsov diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index cb1ccb4..f44f5b6 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -87,6 +87,30 @@ int power_supply_am_i_supplied(struct power_supply *psy) return error; } +static int __power_supply_is_system_supplied(struct device *dev, void *data) +{ + union power_supply_propval ret = {0,}; + struct power_supply *psy = dev_get_drvdata(dev); + + if (psy->type != POWER_SUPPLY_TYPE_BATTERY) { + if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret)) + return 0; + if (ret.intval) + return ret.intval; + } + return 0; +} + +int power_supply_is_system_supplied(void) +{ + int error; + + error = class_for_each_device(power_supply_class, NULL, NULL, + __power_supply_is_system_supplied); + + return error; +} + int power_supply_register(struct device *parent, struct power_supply *psy) { int rc = 0; @@ -148,6 +172,7 @@ static void __exit power_supply_class_exit(void) EXPORT_SYMBOL_GPL(power_supply_changed); EXPORT_SYMBOL_GPL(power_supply_am_i_supplied); +EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); EXPORT_SYMBOL_GPL(power_supply_register); EXPORT_SYMBOL_GPL(power_supply_unregister); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index ea96ead..f9348cb 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -165,6 +165,12 @@ struct power_supply_info { extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); +#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE) +extern int power_supply_is_system_supplied(void); +#else +static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } +#endif + extern int power_supply_register(struct device *parent, struct power_supply *psy); extern void power_supply_unregister(struct power_supply *psy); -- cgit v0.10.2 From b996ad0e9fb15ca4acc60bcd0380912117a45d13 Mon Sep 17 00:00:00 2001 From: Rodolfo Giometti Date: Wed, 20 Aug 2008 16:52:58 -0700 Subject: power_supply: Support for Texas Instruments BQ27200 battery managers These battery managers came in two different packages: one for I2C busses (BQ27200) and one for HDQ busses (BQ27000). This driver currently supports only the I2C chip version but the code is designed in order to easily allow the HDQ chip version integration. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: make things static, use kasprintf()] Signed-off-by: Rodolfo Giometti Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Anton Vorontsov diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 9ce5585..b2bd104 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -62,4 +62,10 @@ config BATTERY_PALMTX help Say Y to enable support for the battery in Palm T|X. +config BATTERY_BQ27x00 + tristate "BQ27200 battery driver" + depends on I2C + help + Say Y here to enable support for batteries with BQ27200(I2C) chip. + endif # POWER_SUPPLY diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 4706bf8..6cb301b 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o +obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c new file mode 100644 index 0000000..62d4948 --- /dev/null +++ b/drivers/power/bq27x00_battery.c @@ -0,0 +1,382 @@ +/* + * BQ27x00 battery driver + * + * Copyright (C) 2008 Rodolfo Giometti + * Copyright (C) 2008 Eurotech S.p.A. + * + * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_VERSION "1.0.0" + +#define BQ27x00_REG_TEMP 0x06 +#define BQ27x00_REG_VOLT 0x08 +#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */ +#define BQ27x00_REG_AI 0x14 +#define BQ27x00_REG_FLAGS 0x0A +#define HIGH_BYTE(A) ((A) << 8) + +/* If the system has several batteries we need a different name for each + * of them... + */ +static DEFINE_IDR(battery_id); +static DEFINE_MUTEX(battery_mutex); + +struct bq27x00_device_info; +struct bq27x00_access_methods { + int (*read)(u8 reg, int *rt_value, int b_single, + struct bq27x00_device_info *di); +}; + +struct bq27x00_device_info { + struct device *dev; + int id; + int voltage_uV; + int current_uA; + int temp_C; + int charge_rsoc; + struct bq27x00_access_methods *bus; + struct power_supply bat; + + struct i2c_client *client; +}; + +static enum power_supply_property bq27x00_battery_props[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_TEMP, +}; + +/* + * Common code for BQ27x00 devices + */ + +static int bq27x00_read(u8 reg, int *rt_value, int b_single, + struct bq27x00_device_info *di) +{ + int ret; + + ret = di->bus->read(reg, rt_value, b_single, di); + *rt_value = be16_to_cpu(*rt_value); + + return ret; +} + +/* + * Return the battery temperature in Celcius degrees + * Or < 0 if something fails. + */ +static int bq27x00_battery_temperature(struct bq27x00_device_info *di) +{ + int ret; + int temp = 0; + + ret = bq27x00_read(BQ27x00_REG_TEMP, &temp, 0, di); + if (ret) { + dev_err(di->dev, "error reading temperature\n"); + return ret; + } + + return (temp >> 2) - 273; +} + +/* + * Return the battery Voltage in milivolts + * Or < 0 if something fails. + */ +static int bq27x00_battery_voltage(struct bq27x00_device_info *di) +{ + int ret; + int volt = 0; + + ret = bq27x00_read(BQ27x00_REG_VOLT, &volt, 0, di); + if (ret) { + dev_err(di->dev, "error reading voltage\n"); + return ret; + } + + return volt; +} + +/* + * Return the battery average current + * Note that current can be negative signed as well + * Or 0 if something fails. + */ +static int bq27x00_battery_current(struct bq27x00_device_info *di) +{ + int ret; + int curr = 0; + int flags = 0; + + ret = bq27x00_read(BQ27x00_REG_AI, &curr, 0, di); + if (ret) { + dev_err(di->dev, "error reading current\n"); + return 0; + } + ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); + if (ret < 0) { + dev_err(di->dev, "error reading flags\n"); + return 0; + } + if ((flags & (1 << 7)) != 0) { + dev_dbg(di->dev, "negative current!\n"); + return -curr; + } + return curr; +} + +/* + * Return the battery Relative State-of-Charge + * Or < 0 if something fails. + */ +static int bq27x00_battery_rsoc(struct bq27x00_device_info *di) +{ + int ret; + int rsoc = 0; + + ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di); + if (ret) { + dev_err(di->dev, "error reading relative State-of-Charge\n"); + return ret; + } + + return rsoc >> 8; +} + +#define to_bq27x00_device_info(x) container_of((x), \ + struct bq27x00_device_info, bat); + +static int bq27x00_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct bq27x00_device_info *di = to_bq27x00_device_info(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_PRESENT: + val->intval = bq27x00_battery_voltage(di); + if (psp == POWER_SUPPLY_PROP_PRESENT) + val->intval = val->intval <= 0 ? 0 : 1; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = bq27x00_battery_current(di); + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = bq27x00_battery_rsoc(di); + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = bq27x00_battery_temperature(di); + break; + default: + return -EINVAL; + } + + return 0; +} + +static void bq27x00_powersupply_init(struct bq27x00_device_info *di) +{ + di->bat.type = POWER_SUPPLY_TYPE_BATTERY; + di->bat.properties = bq27x00_battery_props; + di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props); + di->bat.get_property = bq27x00_battery_get_property; + di->bat.external_power_changed = NULL; +} + +/* + * BQ27200 specific code + */ + +static int bq27200_read(u8 reg, int *rt_value, int b_single, + struct bq27x00_device_info *di) +{ + struct i2c_client *client = di->client; + struct i2c_msg msg[1]; + unsigned char data[2]; + int err; + + if (!client->adapter) + return -ENODEV; + + msg->addr = client->addr; + msg->flags = 0; + msg->len = 1; + msg->buf = data; + + data[0] = reg; + err = i2c_transfer(client->adapter, msg, 1); + + if (err >= 0) { + if (!b_single) + msg->len = 2; + else + msg->len = 1; + + msg->flags = I2C_M_RD; + err = i2c_transfer(client->adapter, msg, 1); + if (err >= 0) { + if (!b_single) + *rt_value = data[1] | HIGH_BYTE(data[0]); + else + *rt_value = data[0]; + + return 0; + } + } + return err; +} + +static int bq27200_battery_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + char *name; + struct bq27x00_device_info *di; + struct bq27x00_access_methods *bus; + int num; + int retval = 0; + + /* Get new ID for the new battery device */ + retval = idr_pre_get(&battery_id, GFP_KERNEL); + if (retval == 0) + return -ENOMEM; + mutex_lock(&battery_mutex); + retval = idr_get_new(&battery_id, client, &num); + mutex_unlock(&battery_mutex); + if (retval < 0) + return retval; + + name = kasprintf(GFP_KERNEL, "bq27200-%d", num); + if (!name) { + dev_err(&client->dev, "failed to allocate device name\n"); + retval = -ENOMEM; + goto batt_failed_1; + } + + di = kzalloc(sizeof(*di), GFP_KERNEL); + if (!di) { + dev_err(&client->dev, "failed to allocate device info data\n"); + retval = -ENOMEM; + goto batt_failed_2; + } + di->id = num; + + bus = kzalloc(sizeof(*bus), GFP_KERNEL); + if (!bus) { + dev_err(&client->dev, "failed to allocate access method " + "data\n"); + retval = -ENOMEM; + goto batt_failed_3; + } + + i2c_set_clientdata(client, di); + di->dev = &client->dev; + di->bat.name = name; + bus->read = &bq27200_read; + di->bus = bus; + di->client = client; + + bq27x00_powersupply_init(di); + + retval = power_supply_register(&client->dev, &di->bat); + if (retval) { + dev_err(&client->dev, "failed to register battery\n"); + goto batt_failed_4; + } + + dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); + + return 0; + +batt_failed_4: + kfree(bus); +batt_failed_3: + kfree(di); +batt_failed_2: + kfree(name); +batt_failed_1: + mutex_lock(&battery_mutex); + idr_remove(&battery_id, num); + mutex_unlock(&battery_mutex); + + return retval; +} + +static int bq27200_battery_remove(struct i2c_client *client) +{ + struct bq27x00_device_info *di = i2c_get_clientdata(client); + + power_supply_unregister(&di->bat); + + kfree(di->bat.name); + + mutex_lock(&battery_mutex); + idr_remove(&battery_id, di->id); + mutex_unlock(&battery_mutex); + + kfree(di); + + return 0; +} + +/* + * Module stuff + */ + +static const struct i2c_device_id bq27200_id[] = { + { "bq27200", 0 }, + {}, +}; + +static struct i2c_driver bq27200_battery_driver = { + .driver = { + .name = "bq27200-battery", + }, + .probe = bq27200_battery_probe, + .remove = bq27200_battery_remove, + .id_table = bq27200_id, +}; + +static int __init bq27x00_battery_init(void) +{ + int ret; + + ret = i2c_add_driver(&bq27200_battery_driver); + if (ret) + printk(KERN_ERR "Unable to register BQ27200 driver\n"); + + return ret; +} +module_init(bq27x00_battery_init); + +static void __exit bq27x00_battery_exit(void) +{ + i2c_del_driver(&bq27200_battery_driver); +} +module_exit(bq27x00_battery_exit); + +MODULE_AUTHOR("Rodolfo Giometti "); +MODULE_DESCRIPTION("BQ27x00 battery monitor driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 31db6e9ea1dbdcf66b8227b4f7035dee1b1dd8c0 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 29 Aug 2008 07:19:50 +0400 Subject: [JFFS2] Move JFFS2 config options out of fs/Kconfig Signed-off-by: Alexey Dobriyan Signed-off-by: David Woodhouse diff --git a/fs/Kconfig b/fs/Kconfig index d387358..5831f9c 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -1136,195 +1136,7 @@ config EFS_FS To compile the EFS file system support as a module, choose M here: the module will be called efs. -config JFFS2_FS - tristate "Journalling Flash File System v2 (JFFS2) support" - select CRC32 - depends on MTD - help - JFFS2 is the second generation of the Journalling Flash File System - for use on diskless embedded devices. It provides improved wear - levelling, compression and support for hard links. You cannot use - this on normal block devices, only on 'MTD' devices. - - Further information on the design and implementation of JFFS2 is - available at . - -config JFFS2_FS_DEBUG - int "JFFS2 debugging verbosity (0 = quiet, 2 = noisy)" - depends on JFFS2_FS - default "0" - help - This controls the amount of debugging messages produced by the JFFS2 - code. Set it to zero for use in production systems. For evaluation, - testing and debugging, it's advisable to set it to one. This will - enable a few assertions and will print debugging messages at the - KERN_DEBUG loglevel, where they won't normally be visible. Level 2 - is unlikely to be useful - it enables extra debugging in certain - areas which at one point needed debugging, but when the bugs were - located and fixed, the detailed messages were relegated to level 2. - - If reporting bugs, please try to have available a full dump of the - messages at debug level 1 while the misbehaviour was occurring. - -config JFFS2_FS_WRITEBUFFER - bool "JFFS2 write-buffering support" - depends on JFFS2_FS - default y - help - This enables the write-buffering support in JFFS2. - - This functionality is required to support JFFS2 on the following - types of flash devices: - - NAND flash - - NOR flash with transparent ECC - - DataFlash - -config JFFS2_FS_WBUF_VERIFY - bool "Verify JFFS2 write-buffer reads" - depends on JFFS2_FS_WRITEBUFFER - default n - help - This causes JFFS2 to read back every page written through the - write-buffer, and check for errors. - -config JFFS2_SUMMARY - bool "JFFS2 summary support (EXPERIMENTAL)" - depends on JFFS2_FS && EXPERIMENTAL - default n - help - This feature makes it possible to use summary information - for faster filesystem mount. - - The summary information can be inserted into a filesystem image - by the utility 'sumtool'. - - If unsure, say 'N'. - -config JFFS2_FS_XATTR - bool "JFFS2 XATTR support (EXPERIMENTAL)" - depends on JFFS2_FS && EXPERIMENTAL - default n - help - Extended attributes are name:value pairs associated with inodes by - the kernel or by users (see the attr(5) manual page, or visit - for details). - - If unsure, say N. - -config JFFS2_FS_POSIX_ACL - bool "JFFS2 POSIX Access Control Lists" - depends on JFFS2_FS_XATTR - default y - select FS_POSIX_ACL - help - Posix Access Control Lists (ACLs) support permissions for users and - groups beyond the owner/group/world scheme. - - To learn more about Access Control Lists, visit the Posix ACLs for - Linux website . - - If you don't know what Access Control Lists are, say N - -config JFFS2_FS_SECURITY - bool "JFFS2 Security Labels" - depends on JFFS2_FS_XATTR - default y - help - Security labels support alternative access control models - implemented by security modules like SELinux. This option - enables an extended attribute handler for file security - labels in the jffs2 filesystem. - - If you are not using a security module that requires using - extended attributes for file security labels, say N. - -config JFFS2_COMPRESSION_OPTIONS - bool "Advanced compression options for JFFS2" - depends on JFFS2_FS - default n - help - Enabling this option allows you to explicitly choose which - compression modules, if any, are enabled in JFFS2. Removing - compressors can mean you cannot read existing file systems, - and enabling experimental compressors can mean that you - write a file system which cannot be read by a standard kernel. - - If unsure, you should _definitely_ say 'N'. - -config JFFS2_ZLIB - bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS - select ZLIB_INFLATE - select ZLIB_DEFLATE - depends on JFFS2_FS - default y - help - Zlib is designed to be a free, general-purpose, legally unencumbered, - lossless data-compression library for use on virtually any computer - hardware and operating system. See for - further information. - - Say 'Y' if unsure. - -config JFFS2_LZO - bool "JFFS2 LZO compression support" if JFFS2_COMPRESSION_OPTIONS - select LZO_COMPRESS - select LZO_DECOMPRESS - depends on JFFS2_FS - default n - help - minilzo-based compression. Generally works better than Zlib. - - This feature was added in July, 2007. Say 'N' if you need - compatibility with older bootloaders or kernels. - -config JFFS2_RTIME - bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS - depends on JFFS2_FS - default y - help - Rtime does manage to recompress already-compressed data. Say 'Y' if unsure. - -config JFFS2_RUBIN - bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS - depends on JFFS2_FS - default n - help - RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure. - -choice - prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS - default JFFS2_CMODE_PRIORITY - depends on JFFS2_FS - help - You can set here the default compression mode of JFFS2 from - the available compression modes. Don't touch if unsure. - -config JFFS2_CMODE_NONE - bool "no compression" - help - Uses no compression. - -config JFFS2_CMODE_PRIORITY - bool "priority" - help - Tries the compressors in a predefined order and chooses the first - successful one. - -config JFFS2_CMODE_SIZE - bool "size (EXPERIMENTAL)" - help - Tries all compressors and chooses the one which has the smallest - result. - -config JFFS2_CMODE_FAVOURLZO - bool "Favour LZO" - help - Tries all compressors and chooses the one which has the smallest - result but gives some preference to LZO (which has faster - decompression) at the expense of size. - -endchoice - +source "fs/jffs2/Kconfig" # UBIFS File system configuration source "fs/ubifs/Kconfig" diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig new file mode 100644 index 0000000..6ae169c --- /dev/null +++ b/fs/jffs2/Kconfig @@ -0,0 +1,188 @@ +config JFFS2_FS + tristate "Journalling Flash File System v2 (JFFS2) support" + select CRC32 + depends on MTD + help + JFFS2 is the second generation of the Journalling Flash File System + for use on diskless embedded devices. It provides improved wear + levelling, compression and support for hard links. You cannot use + this on normal block devices, only on 'MTD' devices. + + Further information on the design and implementation of JFFS2 is + available at . + +config JFFS2_FS_DEBUG + int "JFFS2 debugging verbosity (0 = quiet, 2 = noisy)" + depends on JFFS2_FS + default "0" + help + This controls the amount of debugging messages produced by the JFFS2 + code. Set it to zero for use in production systems. For evaluation, + testing and debugging, it's advisable to set it to one. This will + enable a few assertions and will print debugging messages at the + KERN_DEBUG loglevel, where they won't normally be visible. Level 2 + is unlikely to be useful - it enables extra debugging in certain + areas which at one point needed debugging, but when the bugs were + located and fixed, the detailed messages were relegated to level 2. + + If reporting bugs, please try to have available a full dump of the + messages at debug level 1 while the misbehaviour was occurring. + +config JFFS2_FS_WRITEBUFFER + bool "JFFS2 write-buffering support" + depends on JFFS2_FS + default y + help + This enables the write-buffering support in JFFS2. + + This functionality is required to support JFFS2 on the following + types of flash devices: + - NAND flash + - NOR flash with transparent ECC + - DataFlash + +config JFFS2_FS_WBUF_VERIFY + bool "Verify JFFS2 write-buffer reads" + depends on JFFS2_FS_WRITEBUFFER + default n + help + This causes JFFS2 to read back every page written through the + write-buffer, and check for errors. + +config JFFS2_SUMMARY + bool "JFFS2 summary support (EXPERIMENTAL)" + depends on JFFS2_FS && EXPERIMENTAL + default n + help + This feature makes it possible to use summary information + for faster filesystem mount. + + The summary information can be inserted into a filesystem image + by the utility 'sumtool'. + + If unsure, say 'N'. + +config JFFS2_FS_XATTR + bool "JFFS2 XATTR support (EXPERIMENTAL)" + depends on JFFS2_FS && EXPERIMENTAL + default n + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + for details). + + If unsure, say N. + +config JFFS2_FS_POSIX_ACL + bool "JFFS2 POSIX Access Control Lists" + depends on JFFS2_FS_XATTR + default y + select FS_POSIX_ACL + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the Posix ACLs for + Linux website . + + If you don't know what Access Control Lists are, say N + +config JFFS2_FS_SECURITY + bool "JFFS2 Security Labels" + depends on JFFS2_FS_XATTR + default y + help + Security labels support alternative access control models + implemented by security modules like SELinux. This option + enables an extended attribute handler for file security + labels in the jffs2 filesystem. + + If you are not using a security module that requires using + extended attributes for file security labels, say N. + +config JFFS2_COMPRESSION_OPTIONS + bool "Advanced compression options for JFFS2" + depends on JFFS2_FS + default n + help + Enabling this option allows you to explicitly choose which + compression modules, if any, are enabled in JFFS2. Removing + compressors can mean you cannot read existing file systems, + and enabling experimental compressors can mean that you + write a file system which cannot be read by a standard kernel. + + If unsure, you should _definitely_ say 'N'. + +config JFFS2_ZLIB + bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS + select ZLIB_INFLATE + select ZLIB_DEFLATE + depends on JFFS2_FS + default y + help + Zlib is designed to be a free, general-purpose, legally unencumbered, + lossless data-compression library for use on virtually any computer + hardware and operating system. See for + further information. + + Say 'Y' if unsure. + +config JFFS2_LZO + bool "JFFS2 LZO compression support" if JFFS2_COMPRESSION_OPTIONS + select LZO_COMPRESS + select LZO_DECOMPRESS + depends on JFFS2_FS + default n + help + minilzo-based compression. Generally works better than Zlib. + + This feature was added in July, 2007. Say 'N' if you need + compatibility with older bootloaders or kernels. + +config JFFS2_RTIME + bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS + depends on JFFS2_FS + default y + help + Rtime does manage to recompress already-compressed data. Say 'Y' if unsure. + +config JFFS2_RUBIN + bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS + depends on JFFS2_FS + default n + help + RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure. + +choice + prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS + default JFFS2_CMODE_PRIORITY + depends on JFFS2_FS + help + You can set here the default compression mode of JFFS2 from + the available compression modes. Don't touch if unsure. + +config JFFS2_CMODE_NONE + bool "no compression" + help + Uses no compression. + +config JFFS2_CMODE_PRIORITY + bool "priority" + help + Tries the compressors in a predefined order and chooses the first + successful one. + +config JFFS2_CMODE_SIZE + bool "size (EXPERIMENTAL)" + help + Tries all compressors and chooses the one which has the smallest + result. + +config JFFS2_CMODE_FAVOURLZO + bool "Favour LZO" + help + Tries all compressors and chooses the one which has the smallest + result but gives some preference to LZO (which has faster + decompression) at the expense of size. + +endchoice -- cgit v0.10.2 From 3fc678a0e63138f56109ea31850f19b2e29c45b8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 27 Aug 2008 14:48:32 +0100 Subject: CRED: Wrap task credential accesses in the JFFS2 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Signed-off-by: David Woodhouse diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 89e9b73..249305d 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -442,14 +442,14 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i memset(ri, 0, sizeof(*ri)); /* Set OS-specific defaults for new inodes */ - ri->uid = cpu_to_je16(current->fsuid); + ri->uid = cpu_to_je16(current_fsuid()); if (dir_i->i_mode & S_ISGID) { ri->gid = cpu_to_je16(dir_i->i_gid); if (S_ISDIR(mode)) mode |= S_ISGID; } else { - ri->gid = cpu_to_je16(current->fsgid); + ri->gid = cpu_to_je16(current_fsgid()); } /* POSIX ACLs have to be processed now, at least partly. -- cgit v0.10.2 From 4262bd2981307258b31e15f1a526d2b3884e77b5 Mon Sep 17 00:00:00 2001 From: Semun Lee Date: Mon, 1 Sep 2008 11:49:27 +0100 Subject: [MTD] [NAND] pxa3xx_nand_flash: Add definition of STM2GbX16 NAND flashes Signed-off-by: Semun Lee Acked-by: Eric Miao Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index a64ad15..0cd213c 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -291,10 +291,33 @@ static struct pxa3xx_nand_flash micron1GbX16 = { .chip_id = 0xb12c, }; +static struct pxa3xx_nand_timing stm2GbX16_timing = { + .tCH = 10, + .tCS = 35, + .tWH = 15, + .tWP = 25, + .tRH = 15, + .tRP = 25, + .tR = 25000, + .tWHR = 60, + .tAR = 10, +}; + +static struct pxa3xx_nand_flash stm2GbX16 = { + .timing = &stm2GbX16_timing, + .page_per_block = 64, + .page_size = 2048, + .flash_width = 16, + .dfc_width = 16, + .num_blocks = 2048, + .chip_id = 0xba20, +}; + static struct pxa3xx_nand_flash *builtin_flash_types[] = { &samsung512MbX16, µn1GbX8, µn1GbX16, + &stm2GbX16, }; #define NDTR0_tCH(c) (min((c), 7) << 19) -- cgit v0.10.2 From 5e706469a0518ec640a122aa5da22035e2af003a Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 1 Sep 2008 12:21:05 +0100 Subject: [MTD] [NOR] Select MTD_CFI_UTIL when MTD_CFI probe routine is enabled It requires cfi_qry_mode_on(), which is in cfi_util.c Reported by Russell King Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index 9401bfe..9408099 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -6,6 +6,7 @@ menu "RAM/ROM/Flash chip drivers" config MTD_CFI tristate "Detect flash chips by Common Flash Interface (CFI) probe" select MTD_GEN_PROBE + select MTD_CFI_UTIL help The Common Flash Interface specification was developed by Intel, AMD and other flash manufactures that provides a universal method -- cgit v0.10.2 From 43035338ad772b6a4097b2ac530b75390bee87c1 Mon Sep 17 00:00:00 2001 From: Enrico Scholz Date: Fri, 29 Aug 2008 12:57:28 +0200 Subject: [MTD] [NAND] pxa3xx_nand: moved nand definitions into shared platform header This patch moves the exported datastructures from the pxa3xx_nand.c driver into the header. This is a plain movement without any modification of the attributes. This is the first one of a set of patches which: * allows to specify used NAND flash in the platform code and allows to turn off the old way to specify NAND characteristics in the driver. This way did not worked well as these characteristics depend on the platform and can not be derived from NAND id alone. E.g. some NAND chips share the same ID (e.g. K9K8G08U0A and K9NBG08U5A) but have different timings (which are written in the common driver currently and must be modified there). * adds 'const' annotations at various places Further patches will be sent to the mtd-list. Signed-off-by: Enrico Scholz Signed-off-by: David Woodhouse diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h index eb4b190..0dc50d8 100644 --- a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h +++ b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h @@ -4,6 +4,50 @@ #include #include +struct pxa3xx_nand_timing { + unsigned int tCH; /* Enable signal hold time */ + unsigned int tCS; /* Enable signal setup time */ + unsigned int tWH; /* ND_nWE high duration */ + unsigned int tWP; /* ND_nWE pulse time */ + unsigned int tRH; /* ND_nRE high duration */ + unsigned int tRP; /* ND_nRE pulse width */ + unsigned int tR; /* ND_nWE high to ND_nRE low for read */ + unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */ + unsigned int tAR; /* ND_ALE low to ND_nRE low delay */ +}; + +struct pxa3xx_nand_cmdset { + uint16_t read1; + uint16_t read2; + uint16_t program; + uint16_t read_status; + uint16_t read_id; + uint16_t erase; + uint16_t reset; + uint16_t lock; + uint16_t unlock; + uint16_t lock_status; +}; + +struct pxa3xx_nand_flash { + struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ + struct pxa3xx_nand_cmdset *cmdset; + + uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */ + uint32_t page_size; /* Page size in bytes (PAGE_SZ) */ + uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */ + uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */ + uint32_t num_blocks; /* Number of physical blocks in Flash */ + uint32_t chip_id; + + /* NOTE: these are automatically calculated, do not define */ + size_t oob_size; + size_t read_id_bytes; + + unsigned int col_addr_cycles; + unsigned int row_addr_cycles; +}; + struct pxa3xx_nand_platform_data { /* the data flash bus is shared between the Static Memory diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 0cd213c..203e8ef 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -115,50 +115,6 @@ enum { STATE_PIO_WRITING, }; -struct pxa3xx_nand_timing { - unsigned int tCH; /* Enable signal hold time */ - unsigned int tCS; /* Enable signal setup time */ - unsigned int tWH; /* ND_nWE high duration */ - unsigned int tWP; /* ND_nWE pulse time */ - unsigned int tRH; /* ND_nRE high duration */ - unsigned int tRP; /* ND_nRE pulse width */ - unsigned int tR; /* ND_nWE high to ND_nRE low for read */ - unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */ - unsigned int tAR; /* ND_ALE low to ND_nRE low delay */ -}; - -struct pxa3xx_nand_cmdset { - uint16_t read1; - uint16_t read2; - uint16_t program; - uint16_t read_status; - uint16_t read_id; - uint16_t erase; - uint16_t reset; - uint16_t lock; - uint16_t unlock; - uint16_t lock_status; -}; - -struct pxa3xx_nand_flash { - struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ - struct pxa3xx_nand_cmdset *cmdset; - - uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */ - uint32_t page_size; /* Page size in bytes (PAGE_SZ) */ - uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */ - uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */ - uint32_t num_blocks; /* Number of physical blocks in Flash */ - uint32_t chip_id; - - /* NOTE: these are automatically calculated, do not define */ - size_t oob_size; - size_t read_id_bytes; - - unsigned int col_addr_cycles; - unsigned int row_addr_cycles; -}; - struct pxa3xx_nand_info { struct nand_chip nand_chip; -- cgit v0.10.2 From c8ac3f818e1183eab8d08a41b01b6078c5df4b43 Mon Sep 17 00:00:00 2001 From: Enrico Scholz Date: Fri, 29 Aug 2008 12:59:48 +0200 Subject: [MTD] [NAND] pxa3xx_nand: allow to define flash types in the platform data This patch adds 'flash' and 'num_flash' attributes to the platform data. There was added code in the driver to iterate across these attributes in the detect-flash routine. This is done similarly to the existing method which uses a 'builtin_flash_types' field. Signed-off-by: Enrico Scholz Signed-off-by: David Woodhouse diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h index 0dc50d8..6ac9aea 100644 --- a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h +++ b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h @@ -58,6 +58,9 @@ struct pxa3xx_nand_platform_data { struct mtd_partition *parts; unsigned int nr_parts; + + struct pxa3xx_nand_flash * const flash; + size_t num_flash; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 203e8ef..1906aba 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -911,12 +911,26 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, return 0; } -static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info) +static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, + const struct pxa3xx_nand_platform_data *pdata) { struct pxa3xx_nand_flash *f; uint32_t id; int i; + for (i = 0; inum_flash; ++i) { + f = pdata->flash + i; + + if (pxa3xx_nand_config_flash(info, f)) + continue; + + if (__readid(info, &id)) + continue; + + if (id == f->chip_id) + return 0; + } + for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) { f = builtin_flash_types[i]; @@ -1114,7 +1128,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) goto fail_free_buf; } - ret = pxa3xx_nand_detect_flash(info); + ret = pxa3xx_nand_detect_flash(info, pdata); if (ret) { dev_err(&pdev->dev, "failed to detect flash\n"); ret = -ENODEV; -- cgit v0.10.2 From 80ebf20f34c30760cfba7b5e0a418241181d2cd9 Mon Sep 17 00:00:00 2001 From: Enrico Scholz Date: Fri, 29 Aug 2008 12:59:49 +0200 Subject: [MTD] [NAND] pxa3xx_nand: allow to disable builtin flash-type table This patch adds a MTD_NAND_PXA3xx_BUILTIN configuration variables which allows to disable usage of builtin flash-type table. Not enabling this option saves some space in the generated driver. Signed-off-by: Enrico Scholz Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 8eb2b06..6eebe852 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -334,6 +334,13 @@ config MTD_NAND_PXA3xx This enables the driver for the NAND flash device found on PXA3xx processors +config MTD_NAND_PXA3xx_BUILTIN + bool "Use builtin definitions for some NAND chips (deprecated)" + depends on MTD_NAND_PXA3xx + help + This enables builtin definitions for some NAND chips. This + is deprecated in favor of platform specific data. + config MTD_NAND_CM_X270 tristate "Support for NAND Flash on CM-X270 modules" depends on MTD_NAND && MACH_ARMCORE diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 1906aba..e492804 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -164,6 +164,7 @@ static int use_dma = 1; module_param(use_dma, bool, 0444); MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW"); +#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN static struct pxa3xx_nand_cmdset smallpage_cmdset = { .read1 = 0x0000, .read2 = 0x0050, @@ -275,6 +276,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { µn1GbX16, &stm2GbX16, }; +#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */ #define NDTR0_tCH(c) (min((c), 7) << 19) #define NDTR0_tCS(c) (min((c), 7) << 16) @@ -931,6 +933,7 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, return 0; } +#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) { f = builtin_flash_types[i]; @@ -944,6 +947,7 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, if (id == f->chip_id) return 0; } +#endif return -ENODEV; } -- cgit v0.10.2 From 7dad482ed0648a40e403d1ed44e0ea92248632f1 Mon Sep 17 00:00:00 2001 From: Enrico Scholz Date: Fri, 29 Aug 2008 12:59:50 +0200 Subject: [MTD] [NAND] pxa3xx_nand: added some 'const' annotations to the exported API This patch marks some attributes as 'const' which are set only once and never be modified by the driver. There are some changes in parameter list and variable declarations too which mark them as 'const'. Signed-off-by: Enrico Scholz Signed-off-by: David Woodhouse diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h index 6ac9aea..cfcb2e2 100644 --- a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h +++ b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h @@ -30,8 +30,8 @@ struct pxa3xx_nand_cmdset { }; struct pxa3xx_nand_flash { - struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ - struct pxa3xx_nand_cmdset *cmdset; + const struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ + const struct pxa3xx_nand_cmdset *cmdset; uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */ uint32_t page_size; /* Page size in bytes (PAGE_SZ) */ @@ -56,8 +56,8 @@ struct pxa3xx_nand_platform_data { */ int enable_arbiter; - struct mtd_partition *parts; - unsigned int nr_parts; + const struct mtd_partition *parts; + unsigned int nr_parts; struct pxa3xx_nand_flash * const flash; size_t num_flash; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index e492804..af17405 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -293,7 +293,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { #define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) + 1) static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, - struct pxa3xx_nand_timing *t) + const struct pxa3xx_nand_timing *t) { unsigned long nand_clk = clk_get_rate(info->clk); uint32_t ndtr0, ndtr1; @@ -336,7 +336,7 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, uint16_t cmd, int column, int page_addr) { struct pxa3xx_nand_flash *f = info->flash_info; - struct pxa3xx_nand_cmdset *cmdset = f->cmdset; + const struct pxa3xx_nand_cmdset *cmdset = f->cmdset; /* calculate data size */ switch (f->page_size) { @@ -387,7 +387,7 @@ static int prepare_erase_cmd(struct pxa3xx_nand_info *info, static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd) { - struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset; + const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset; info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); info->ndcb1 = 0; @@ -623,7 +623,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, { struct pxa3xx_nand_info *info = mtd->priv; struct pxa3xx_nand_flash *flash_info = info->flash_info; - struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset; + const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset; int ret; info->use_dma = (use_dma) ? 1 : 0; @@ -843,7 +843,7 @@ static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd, static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) { struct pxa3xx_nand_flash *f = info->flash_info; - struct pxa3xx_nand_cmdset *cmdset = f->cmdset; + const struct pxa3xx_nand_cmdset *cmdset = f->cmdset; uint32_t ndcr; uint8_t id_buff[8]; -- cgit v0.10.2 From c8c17c888d936c58ceb28b084a6272d67e10ea28 Mon Sep 17 00:00:00 2001 From: Enrico Scholz Date: Fri, 29 Aug 2008 12:59:51 +0200 Subject: [MTD] [NAND] pxa3xx_nand: moved some helper variables out from platform data This patch moves some attributes out from the platform data into the dynamically created nand device. This results into a cleaner interface and allows to use constant pxa3xx_nand_flash definitions. Signed-off-by: Enrico Scholz Signed-off-by: David Woodhouse diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h index cfcb2e2..eb35fca 100644 --- a/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h +++ b/arch/arm/mach-pxa/include/mach/pxa3xx_nand.h @@ -39,13 +39,6 @@ struct pxa3xx_nand_flash { uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */ uint32_t num_blocks; /* Number of physical blocks in Flash */ uint32_t chip_id; - - /* NOTE: these are automatically calculated, do not define */ - size_t oob_size; - size_t read_id_bytes; - - unsigned int col_addr_cycles; - unsigned int row_addr_cycles; }; struct pxa3xx_nand_platform_data { @@ -59,7 +52,7 @@ struct pxa3xx_nand_platform_data { const struct mtd_partition *parts; unsigned int nr_parts; - struct pxa3xx_nand_flash * const flash; + const struct pxa3xx_nand_flash * flash; size_t num_flash; }; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index af17405..bc37f55 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -119,7 +119,7 @@ struct pxa3xx_nand_info { struct nand_chip nand_chip; struct platform_device *pdev; - struct pxa3xx_nand_flash *flash_info; + const struct pxa3xx_nand_flash *flash_info; struct clk *clk; void __iomem *mmio_base; @@ -158,6 +158,13 @@ struct pxa3xx_nand_info { uint32_t ndcb0; uint32_t ndcb1; uint32_t ndcb2; + + /* calculated from pxa3xx_nand_flash data */ + size_t oob_size; + size_t read_id_bytes; + + unsigned int col_addr_cycles; + unsigned int row_addr_cycles; }; static int use_dma = 1; @@ -335,7 +342,7 @@ static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event) static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, uint16_t cmd, int column, int page_addr) { - struct pxa3xx_nand_flash *f = info->flash_info; + const struct pxa3xx_nand_flash *f = info->flash_info; const struct pxa3xx_nand_cmdset *cmdset = f->cmdset; /* calculate data size */ @@ -354,14 +361,14 @@ static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); info->ndcb1 = 0; info->ndcb2 = 0; - info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles); + info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles); - if (f->col_addr_cycles == 2) { + if (info->col_addr_cycles == 2) { /* large block, 2 cycles for column address * row address starts from 3rd cycle */ info->ndcb1 |= (page_addr << 16) | (column & 0xffff); - if (f->row_addr_cycles == 3) + if (info->row_addr_cycles == 3) info->ndcb2 = (page_addr >> 16) & 0xff; } else /* small block, 1 cycles for column address @@ -622,7 +629,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, int column, int page_addr) { struct pxa3xx_nand_info *info = mtd->priv; - struct pxa3xx_nand_flash *flash_info = info->flash_info; + const struct pxa3xx_nand_flash *flash_info = info->flash_info; const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset; int ret; @@ -701,7 +708,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, info->use_dma = 0; /* force PIO read */ info->buf_start = 0; info->buf_count = (command == NAND_CMD_READID) ? - flash_info->read_id_bytes : 1; + info->read_id_bytes : 1; if (prepare_other_cmd(info, (command == NAND_CMD_READID) ? cmdset->read_id : cmdset->read_status)) @@ -842,7 +849,7 @@ static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd, static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) { - struct pxa3xx_nand_flash *f = info->flash_info; + const struct pxa3xx_nand_flash *f = info->flash_info; const struct pxa3xx_nand_cmdset *cmdset = f->cmdset; uint32_t ndcr; uint8_t id_buff[8]; @@ -872,7 +879,7 @@ fail_timeout: } static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, - struct pxa3xx_nand_flash *f) + const struct pxa3xx_nand_flash *f) { struct platform_device *pdev = info->pdev; struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; @@ -885,25 +892,25 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, return -EINVAL; /* calculate flash information */ - f->oob_size = (f->page_size == 2048) ? 64 : 16; - f->read_id_bytes = (f->page_size == 2048) ? 4 : 2; + info->oob_size = (f->page_size == 2048) ? 64 : 16; + info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; /* calculate addressing information */ - f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; + info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; if (f->num_blocks * f->page_per_block > 65536) - f->row_addr_cycles = 3; + info->row_addr_cycles = 3; else - f->row_addr_cycles = 2; + info->row_addr_cycles = 2; ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; - ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0; + ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0; ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; - ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes); + ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes); ndcr |= NDCR_SPARE_EN; /* enable spare by default */ info->reg_ndcr = ndcr; @@ -916,7 +923,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, const struct pxa3xx_nand_platform_data *pdata) { - struct pxa3xx_nand_flash *f; + const struct pxa3xx_nand_flash *f; uint32_t id; int i; @@ -1011,7 +1018,7 @@ static struct nand_ecclayout hw_largepage_ecclayout = { static void pxa3xx_nand_init_mtd(struct mtd_info *mtd, struct pxa3xx_nand_info *info) { - struct pxa3xx_nand_flash *f = info->flash_info; + const struct pxa3xx_nand_flash *f = info->flash_info; struct nand_chip *this = &info->nand_chip; this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0; -- cgit v0.10.2 From 2675e9447bb5c861dbd29c5fe55b7ce2ad3ff0f5 Mon Sep 17 00:00:00 2001 From: Enrico Scholz Date: Fri, 29 Aug 2008 12:59:52 +0200 Subject: [MTD] [NAND] pxa3xx_nand: added warning which tells id of detected NAND Minor patch to help debugging of NAND detection. Signed-off-by: Enrico Scholz Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index bc37f55..c0fa9c9 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -924,7 +924,7 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, const struct pxa3xx_nand_platform_data *pdata) { const struct pxa3xx_nand_flash *f; - uint32_t id; + uint32_t id = -1; int i; for (i = 0; inum_flash; ++i) { @@ -956,6 +956,9 @@ static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, } #endif + dev_warn(&info->pdev->dev, + "failed to detect configured nand flash; found %04x instead of\n", + id); return -ENODEV; } -- cgit v0.10.2 From 34f6e15786293e8d6ed05f9c19ed784ff15d2702 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 2 Sep 2008 17:16:59 +0200 Subject: [MTD] [NAND] Freescale i.MX2 NAND driver This patch adds support for the integrated NAND flash controller of the i.MX2 and i.MX3 family. It is tested on MX27 but should work on MX3 aswell. Signed-off-by: Sascha Hauer Acked-by: Juergen Beisert Signed-off-by: David Woodhouse diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h new file mode 100644 index 0000000..2b972df --- /dev/null +++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h @@ -0,0 +1,27 @@ +/* + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Sascha Hauer, kernel@pengutronix.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#ifndef __ASM_ARCH_NAND_H +#define __ASM_ARCH_NAND_H + +struct mxc_nand_platform_data { + int width; /* data bus width in bytes */ + int hw_ecc; /* 0 if supress hardware ECC */ +}; +#endif /* __ASM_ARCH_NAND_H */ diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 6eebe852..7153854 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -407,4 +407,11 @@ config MTD_NAND_FSL_UPM Enables support for NAND Flash chips wired onto Freescale PowerPC processor localbus with User-Programmable Machine support. +config MTD_NAND_MXC + tristate "MXC NAND support" + depends on ARCH_MX2 + help + This enables the driver for the NAND flash controller on the + MXC processors. + endif # MTD_NAND diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 8540c46..e0fee04 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -33,5 +33,6 @@ obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o +obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o nand-objs := nand_base.o nand_bbt.o diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c new file mode 100644 index 0000000..21fd4f1 --- /dev/null +++ b/drivers/mtd/nand/mxc_nand.c @@ -0,0 +1,1077 @@ +/* + * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Sascha Hauer, kernel@pengutronix.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRIVER_NAME "mxc_nand" + +/* Addresses for NFC registers */ +#define NFC_BUF_SIZE 0xE00 +#define NFC_BUF_ADDR 0xE04 +#define NFC_FLASH_ADDR 0xE06 +#define NFC_FLASH_CMD 0xE08 +#define NFC_CONFIG 0xE0A +#define NFC_ECC_STATUS_RESULT 0xE0C +#define NFC_RSLTMAIN_AREA 0xE0E +#define NFC_RSLTSPARE_AREA 0xE10 +#define NFC_WRPROT 0xE12 +#define NFC_UNLOCKSTART_BLKADDR 0xE14 +#define NFC_UNLOCKEND_BLKADDR 0xE16 +#define NFC_NF_WRPRST 0xE18 +#define NFC_CONFIG1 0xE1A +#define NFC_CONFIG2 0xE1C + +/* Addresses for NFC RAM BUFFER Main area 0 */ +#define MAIN_AREA0 0x000 +#define MAIN_AREA1 0x200 +#define MAIN_AREA2 0x400 +#define MAIN_AREA3 0x600 + +/* Addresses for NFC SPARE BUFFER Spare area 0 */ +#define SPARE_AREA0 0x800 +#define SPARE_AREA1 0x810 +#define SPARE_AREA2 0x820 +#define SPARE_AREA3 0x830 + +/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register + * for Command operation */ +#define NFC_CMD 0x1 + +/* Set INT to 0, FADD to 1, rest to 0 in NFC_CONFIG2 Register + * for Address operation */ +#define NFC_ADDR 0x2 + +/* Set INT to 0, FDI to 1, rest to 0 in NFC_CONFIG2 Register + * for Input operation */ +#define NFC_INPUT 0x4 + +/* Set INT to 0, FDO to 001, rest to 0 in NFC_CONFIG2 Register + * for Data Output operation */ +#define NFC_OUTPUT 0x8 + +/* Set INT to 0, FD0 to 010, rest to 0 in NFC_CONFIG2 Register + * for Read ID operation */ +#define NFC_ID 0x10 + +/* Set INT to 0, FDO to 100, rest to 0 in NFC_CONFIG2 Register + * for Read Status operation */ +#define NFC_STATUS 0x20 + +/* Set INT to 1, rest to 0 in NFC_CONFIG2 Register for Read + * Status operation */ +#define NFC_INT 0x8000 + +#define NFC_SP_EN (1 << 2) +#define NFC_ECC_EN (1 << 3) +#define NFC_INT_MSK (1 << 4) +#define NFC_BIG (1 << 5) +#define NFC_RST (1 << 6) +#define NFC_CE (1 << 7) +#define NFC_ONE_CYCLE (1 << 8) + +struct mxc_nand_host { + struct mtd_info mtd; + struct nand_chip nand; + struct mtd_partition *parts; + struct device *dev; + + void __iomem *regs; + int spare_only; + int status_request; + int pagesize_2k; + uint16_t col_addr; + struct clk *clk; + int clk_act; + int irq; + + wait_queue_head_t irq_waitq; +}; + +/* Define delays in microsec for NAND device operations */ +#define TROP_US_DELAY 2000 +/* Macros to get byte and bit positions of ECC */ +#define COLPOS(x) ((x) >> 3) +#define BITPOS(x) ((x) & 0xf) + +/* Define single bit Error positions in Main & Spare area */ +#define MAIN_SINGLEBIT_ERROR 0x4 +#define SPARE_SINGLEBIT_ERROR 0x1 + +/* OOB placement block for use with hardware ecc generation */ +static struct nand_ecclayout nand_hw_eccoob_8 = { + .eccbytes = 5, + .eccpos = {6, 7, 8, 9, 10}, + .oobfree = {{0, 5}, {11, 5}, } +}; + +static struct nand_ecclayout nand_hw_eccoob_16 = { + .eccbytes = 5, + .eccpos = {6, 7, 8, 9, 10}, + .oobfree = {{0, 6}, {12, 4}, } +}; + +#ifdef CONFIG_MTD_PARTITIONS +static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; +#endif + +static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) +{ + struct mxc_nand_host *host = dev_id; + + uint16_t tmp; + + tmp = readw(host->regs + NFC_CONFIG1); + tmp |= NFC_INT_MSK; /* Disable interrupt */ + writew(tmp, host->regs + NFC_CONFIG1); + + wake_up(&host->irq_waitq); + + return IRQ_HANDLED; +} + +/* This function polls the NANDFC to wait for the basic operation to + * complete by checking the INT bit of config2 register. + */ +static void wait_op_done(struct mxc_nand_host *host, int max_retries, + uint16_t param, int useirq) +{ + uint32_t tmp; + + if (useirq) { + if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { + + tmp = readw(host->regs + NFC_CONFIG1); + tmp &= ~NFC_INT_MSK; /* Enable interrupt */ + writew(tmp, host->regs + NFC_CONFIG1); + + wait_event(host->irq_waitq, + readw(host->regs + NFC_CONFIG2) & NFC_INT); + + tmp = readw(host->regs + NFC_CONFIG2); + tmp &= ~NFC_INT; + writew(tmp, host->regs + NFC_CONFIG2); + } + } else { + while (max_retries-- > 0) { + if (readw(host->regs + NFC_CONFIG2) & NFC_INT) { + tmp = readw(host->regs + NFC_CONFIG2); + tmp &= ~NFC_INT; + writew(tmp, host->regs + NFC_CONFIG2); + break; + } + udelay(1); + } + if (max_retries <= 0) + DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n", + __func__, param); + } +} + +/* This function issues the specified command to the NAND device and + * waits for completion. */ +static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq) +{ + DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); + + writew(cmd, host->regs + NFC_FLASH_CMD); + writew(NFC_CMD, host->regs + NFC_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, TROP_US_DELAY, cmd, useirq); +} + +/* This function sends an address (or partial address) to the + * NAND device. The address is used to select the source/destination for + * a NAND command. */ +static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast) +{ + DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast); + + writew(addr, host->regs + NFC_FLASH_ADDR); + writew(NFC_ADDR, host->regs + NFC_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, TROP_US_DELAY, addr, islast); +} + +/* This function requests the NANDFC to initate the transfer + * of data currently in the NANDFC RAM buffer to the NAND device. */ +static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id, + int spare_only) +{ + DEBUG(MTD_DEBUG_LEVEL3, "send_prog_page (%d)\n", spare_only); + + /* NANDFC buffer 0 is used for page read/write */ + writew(buf_id, host->regs + NFC_BUF_ADDR); + + /* Configure spare or page+spare access */ + if (!host->pagesize_2k) { + uint16_t config1 = readw(host->regs + NFC_CONFIG1); + if (spare_only) + config1 |= NFC_SP_EN; + else + config1 &= ~(NFC_SP_EN); + writew(config1, host->regs + NFC_CONFIG1); + } + + writew(NFC_INPUT, host->regs + NFC_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, TROP_US_DELAY, spare_only, true); +} + +/* Requests NANDFC to initated the transfer of data from the + * NAND device into in the NANDFC ram buffer. */ +static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id, + int spare_only) +{ + DEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only); + + /* NANDFC buffer 0 is used for page read/write */ + writew(buf_id, host->regs + NFC_BUF_ADDR); + + /* Configure spare or page+spare access */ + if (!host->pagesize_2k) { + uint32_t config1 = readw(host->regs + NFC_CONFIG1); + if (spare_only) + config1 |= NFC_SP_EN; + else + config1 &= ~NFC_SP_EN; + writew(config1, host->regs + NFC_CONFIG1); + } + + writew(NFC_OUTPUT, host->regs + NFC_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, TROP_US_DELAY, spare_only, true); +} + +/* Request the NANDFC to perform a read of the NAND device ID. */ +static void send_read_id(struct mxc_nand_host *host) +{ + struct nand_chip *this = &host->nand; + uint16_t tmp; + + /* NANDFC buffer 0 is used for device ID output */ + writew(0x0, host->regs + NFC_BUF_ADDR); + + /* Read ID into main buffer */ + tmp = readw(host->regs + NFC_CONFIG1); + tmp &= ~NFC_SP_EN; + writew(tmp, host->regs + NFC_CONFIG1); + + writew(NFC_ID, host->regs + NFC_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, TROP_US_DELAY, 0, true); + + if (this->options & NAND_BUSWIDTH_16) { + void __iomem *main_buf = host->regs + MAIN_AREA0; + /* compress the ID info */ + writeb(readb(main_buf + 2), main_buf + 1); + writeb(readb(main_buf + 4), main_buf + 2); + writeb(readb(main_buf + 6), main_buf + 3); + writeb(readb(main_buf + 8), main_buf + 4); + writeb(readb(main_buf + 10), main_buf + 5); + } +} + +/* This function requests the NANDFC to perform a read of the + * NAND device status and returns the current status. */ +static uint16_t get_dev_status(struct mxc_nand_host *host) +{ + void __iomem *main_buf = host->regs + MAIN_AREA1; + uint32_t store; + uint16_t ret, tmp; + /* Issue status request to NAND device */ + + /* store the main area1 first word, later do recovery */ + store = readl(main_buf); + /* NANDFC buffer 1 is used for device status to prevent + * corruption of read/write buffer on status requests. */ + writew(1, host->regs + NFC_BUF_ADDR); + + /* Read status into main buffer */ + tmp = readw(host->regs + NFC_CONFIG1); + tmp &= ~NFC_SP_EN; + writew(tmp, host->regs + NFC_CONFIG1); + + writew(NFC_STATUS, host->regs + NFC_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, TROP_US_DELAY, 0, true); + + /* Status is placed in first word of main buffer */ + /* get status, then recovery area 1 data */ + ret = readw(main_buf); + writel(store, main_buf); + + return ret; +} + +/* This functions is used by upper layer to checks if device is ready */ +static int mxc_nand_dev_ready(struct mtd_info *mtd) +{ + /* + * NFC handles R/B internally. Therefore, this function + * always returns status as ready. + */ + return 1; +} + +static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode) +{ + /* + * If HW ECC is enabled, we turn it on during init. There is + * no need to enable again here. + */ +} + +static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat, + u_char *read_ecc, u_char *calc_ecc) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + + /* + * 1-Bit errors are automatically corrected in HW. No need for + * additional correction. 2-Bit errors cannot be corrected by + * HW ECC, so we need to return failure + */ + uint16_t ecc_status = readw(host->regs + NFC_ECC_STATUS_RESULT); + + if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { + DEBUG(MTD_DEBUG_LEVEL0, + "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); + return -1; + } + + return 0; +} + +static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + return 0; +} + +static u_char mxc_nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + uint8_t ret = 0; + uint16_t col, rd_word; + uint16_t __iomem *main_buf = host->regs + MAIN_AREA0; + uint16_t __iomem *spare_buf = host->regs + SPARE_AREA0; + + /* Check for status request */ + if (host->status_request) + return get_dev_status(host) & 0xFF; + + /* Get column for 16-bit access */ + col = host->col_addr >> 1; + + /* If we are accessing the spare region */ + if (host->spare_only) + rd_word = readw(&spare_buf[col]); + else + rd_word = readw(&main_buf[col]); + + /* Pick upper/lower byte of word from RAM buffer */ + if (host->col_addr & 0x1) + ret = (rd_word >> 8) & 0xFF; + else + ret = rd_word & 0xFF; + + /* Update saved column address */ + host->col_addr++; + + return ret; +} + +static uint16_t mxc_nand_read_word(struct mtd_info *mtd) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + uint16_t col, rd_word, ret; + uint16_t __iomem *p; + + DEBUG(MTD_DEBUG_LEVEL3, + "mxc_nand_read_word(col = %d)\n", host->col_addr); + + col = host->col_addr; + /* Adjust saved column address */ + if (col < mtd->writesize && host->spare_only) + col += mtd->writesize; + + if (col < mtd->writesize) + p = (host->regs + MAIN_AREA0) + (col >> 1); + else + p = (host->regs + SPARE_AREA0) + ((col - mtd->writesize) >> 1); + + if (col & 1) { + rd_word = readw(p); + ret = (rd_word >> 8) & 0xff; + rd_word = readw(&p[1]); + ret |= (rd_word << 8) & 0xff00; + + } else + ret = readw(p); + + /* Update saved column address */ + host->col_addr = col + 2; + + return ret; +} + +/* Write data of length len to buffer buf. The data to be + * written on NAND Flash is first copied to RAMbuffer. After the Data Input + * Operation by the NFC, the data is written to NAND Flash */ +static void mxc_nand_write_buf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + int n, col, i = 0; + + DEBUG(MTD_DEBUG_LEVEL3, + "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr, + len); + + col = host->col_addr; + + /* Adjust saved column address */ + if (col < mtd->writesize && host->spare_only) + col += mtd->writesize; + + n = mtd->writesize + mtd->oobsize - col; + n = min(len, n); + + DEBUG(MTD_DEBUG_LEVEL3, + "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n); + + while (n) { + void __iomem *p; + + if (col < mtd->writesize) + p = host->regs + MAIN_AREA0 + (col & ~3); + else + p = host->regs + SPARE_AREA0 - + mtd->writesize + (col & ~3); + + DEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__, + __LINE__, p); + + if (((col | (int)&buf[i]) & 3) || n < 16) { + uint32_t data = 0; + + if (col & 3 || n < 4) + data = readl(p); + + switch (col & 3) { + case 0: + if (n) { + data = (data & 0xffffff00) | + (buf[i++] << 0); + n--; + col++; + } + case 1: + if (n) { + data = (data & 0xffff00ff) | + (buf[i++] << 8); + n--; + col++; + } + case 2: + if (n) { + data = (data & 0xff00ffff) | + (buf[i++] << 16); + n--; + col++; + } + case 3: + if (n) { + data = (data & 0x00ffffff) | + (buf[i++] << 24); + n--; + col++; + } + } + + writel(data, p); + } else { + int m = mtd->writesize - col; + + if (col >= mtd->writesize) + m += mtd->oobsize; + + m = min(n, m) & ~3; + + DEBUG(MTD_DEBUG_LEVEL3, + "%s:%d: n = %d, m = %d, i = %d, col = %d\n", + __func__, __LINE__, n, m, i, col); + + memcpy(p, &buf[i], m); + col += m; + i += m; + n -= m; + } + } + /* Update saved column address */ + host->col_addr = col; +} + +/* Read the data buffer from the NAND Flash. To read the data from NAND + * Flash first the data output cycle is initiated by the NFC, which copies + * the data to RAMbuffer. This data of length len is then copied to buffer buf. + */ +static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + int n, col, i = 0; + + DEBUG(MTD_DEBUG_LEVEL3, + "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len); + + col = host->col_addr; + + /* Adjust saved column address */ + if (col < mtd->writesize && host->spare_only) + col += mtd->writesize; + + n = mtd->writesize + mtd->oobsize - col; + n = min(len, n); + + while (n) { + void __iomem *p; + + if (col < mtd->writesize) + p = host->regs + MAIN_AREA0 + (col & ~3); + else + p = host->regs + SPARE_AREA0 - + mtd->writesize + (col & ~3); + + if (((col | (int)&buf[i]) & 3) || n < 16) { + uint32_t data; + + data = readl(p); + switch (col & 3) { + case 0: + if (n) { + buf[i++] = (uint8_t) (data); + n--; + col++; + } + case 1: + if (n) { + buf[i++] = (uint8_t) (data >> 8); + n--; + col++; + } + case 2: + if (n) { + buf[i++] = (uint8_t) (data >> 16); + n--; + col++; + } + case 3: + if (n) { + buf[i++] = (uint8_t) (data >> 24); + n--; + col++; + } + } + } else { + int m = mtd->writesize - col; + + if (col >= mtd->writesize) + m += mtd->oobsize; + + m = min(n, m) & ~3; + memcpy(&buf[i], p, m); + col += m; + i += m; + n -= m; + } + } + /* Update saved column address */ + host->col_addr = col; + +} + +/* Used by the upper layer to verify the data in NAND Flash + * with the data in the buf. */ +static int mxc_nand_verify_buf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + return -EFAULT; +} + +/* This function is used by upper layer for select and + * deselect of the NAND chip */ +static void mxc_nand_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + +#ifdef CONFIG_MTD_NAND_MXC_FORCE_CE + if (chip > 0) { + DEBUG(MTD_DEBUG_LEVEL0, + "ERROR: Illegal chip select (chip = %d)\n", chip); + return; + } + + if (chip == -1) { + writew(readw(host->regs + NFC_CONFIG1) & ~NFC_CE, + host->regs + NFC_CONFIG1); + return; + } + + writew(readw(host->regs + NFC_CONFIG1) | NFC_CE, + host->regs + NFC_CONFIG1); +#endif + + switch (chip) { + case -1: + /* Disable the NFC clock */ + if (host->clk_act) { + clk_disable(host->clk); + host->clk_act = 0; + } + break; + case 0: + /* Enable the NFC clock */ + if (!host->clk_act) { + clk_enable(host->clk); + host->clk_act = 1; + } + break; + + default: + break; + } +} + +/* Used by the upper layer to write command to NAND Flash for + * different operations to be carried out on NAND Flash */ +static void mxc_nand_command(struct mtd_info *mtd, unsigned command, + int column, int page_addr) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + int useirq = true; + + DEBUG(MTD_DEBUG_LEVEL3, + "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", + command, column, page_addr); + + /* Reset command state information */ + host->status_request = false; + + /* Command pre-processing step */ + switch (command) { + + case NAND_CMD_STATUS: + host->col_addr = 0; + host->status_request = true; + break; + + case NAND_CMD_READ0: + host->col_addr = column; + host->spare_only = false; + useirq = false; + break; + + case NAND_CMD_READOOB: + host->col_addr = column; + host->spare_only = true; + useirq = false; + if (host->pagesize_2k) + command = NAND_CMD_READ0; /* only READ0 is valid */ + break; + + case NAND_CMD_SEQIN: + if (column >= mtd->writesize) { + /* + * FIXME: before send SEQIN command for write OOB, + * We must read one page out. + * For K9F1GXX has no READ1 command to set current HW + * pointer to spare area, we must write the whole page + * including OOB together. + */ + if (host->pagesize_2k) + /* call ourself to read a page */ + mxc_nand_command(mtd, NAND_CMD_READ0, 0, + page_addr); + + host->col_addr = column - mtd->writesize; + host->spare_only = true; + + /* Set program pointer to spare region */ + if (!host->pagesize_2k) + send_cmd(host, NAND_CMD_READOOB, false); + } else { + host->spare_only = false; + host->col_addr = column; + + /* Set program pointer to page start */ + if (!host->pagesize_2k) + send_cmd(host, NAND_CMD_READ0, false); + } + useirq = false; + break; + + case NAND_CMD_PAGEPROG: + send_prog_page(host, 0, host->spare_only); + + if (host->pagesize_2k) { + /* data in 4 areas datas */ + send_prog_page(host, 1, host->spare_only); + send_prog_page(host, 2, host->spare_only); + send_prog_page(host, 3, host->spare_only); + } + + break; + + case NAND_CMD_ERASE1: + useirq = false; + break; + } + + /* Write out the command to the device. */ + send_cmd(host, command, useirq); + + /* Write out column address, if necessary */ + if (column != -1) { + /* + * MXC NANDFC can only perform full page+spare or + * spare-only read/write. When the upper layers + * layers perform a read/write buf operation, + * we will used the saved column adress to index into + * the full page. + */ + send_addr(host, 0, page_addr == -1); + if (host->pagesize_2k) + /* another col addr cycle for 2k page */ + send_addr(host, 0, false); + } + + /* Write out page address, if necessary */ + if (page_addr != -1) { + /* paddr_0 - p_addr_7 */ + send_addr(host, (page_addr & 0xff), false); + + if (host->pagesize_2k) { + send_addr(host, (page_addr >> 8) & 0xFF, false); + if (mtd->size >= 0x40000000) + send_addr(host, (page_addr >> 16) & 0xff, true); + } else { + /* One more address cycle for higher density devices */ + if (mtd->size >= 0x4000000) { + /* paddr_8 - paddr_15 */ + send_addr(host, (page_addr >> 8) & 0xff, false); + send_addr(host, (page_addr >> 16) & 0xff, true); + } else + /* paddr_8 - paddr_15 */ + send_addr(host, (page_addr >> 8) & 0xff, true); + } + } + + /* Command post-processing step */ + switch (command) { + + case NAND_CMD_RESET: + break; + + case NAND_CMD_READOOB: + case NAND_CMD_READ0: + if (host->pagesize_2k) { + /* send read confirm command */ + send_cmd(host, NAND_CMD_READSTART, true); + /* read for each AREA */ + send_read_page(host, 0, host->spare_only); + send_read_page(host, 1, host->spare_only); + send_read_page(host, 2, host->spare_only); + send_read_page(host, 3, host->spare_only); + } else + send_read_page(host, 0, host->spare_only); + break; + + case NAND_CMD_READID: + send_read_id(host); + break; + + case NAND_CMD_PAGEPROG: + break; + + case NAND_CMD_STATUS: + break; + + case NAND_CMD_ERASE2: + break; + } +} + +static int __init mxcnd_probe(struct platform_device *pdev) +{ + struct nand_chip *this; + struct mtd_info *mtd; + struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; + struct mxc_nand_host *host; + struct resource *res; + uint16_t tmp; + int err = 0, nr_parts = 0; + + /* Allocate memory for MTD device structure and private data */ + host = kzalloc(sizeof(struct mxc_nand_host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->dev = &pdev->dev; + /* structures must be linked */ + this = &host->nand; + mtd = &host->mtd; + mtd->priv = this; + mtd->owner = THIS_MODULE; + + /* 50 us command delay time */ + this->chip_delay = 5; + + this->priv = host; + this->dev_ready = mxc_nand_dev_ready; + this->cmdfunc = mxc_nand_command; + this->select_chip = mxc_nand_select_chip; + this->read_byte = mxc_nand_read_byte; + this->read_word = mxc_nand_read_word; + this->write_buf = mxc_nand_write_buf; + this->read_buf = mxc_nand_read_buf; + this->verify_buf = mxc_nand_verify_buf; + + host->clk = clk_get(&pdev->dev, "nfc_clk"); + if (IS_ERR(host->clk)) + goto eclk; + + clk_enable(host->clk); + host->clk_act = 1; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -ENODEV; + goto eres; + } + + host->regs = ioremap(res->start, res->end - res->start + 1); + if (!host->regs) { + err = -EIO; + goto eres; + } + + tmp = readw(host->regs + NFC_CONFIG1); + tmp |= NFC_INT_MSK; + writew(tmp, host->regs + NFC_CONFIG1); + + init_waitqueue_head(&host->irq_waitq); + + host->irq = platform_get_irq(pdev, 0); + + err = request_irq(host->irq, mxc_nfc_irq, 0, "mxc_nd", host); + if (err) + goto eirq; + + if (pdata->hw_ecc) { + this->ecc.calculate = mxc_nand_calculate_ecc; + this->ecc.hwctl = mxc_nand_enable_hwecc; + this->ecc.correct = mxc_nand_correct_data; + this->ecc.mode = NAND_ECC_HW; + this->ecc.size = 512; + this->ecc.bytes = 3; + this->ecc.layout = &nand_hw_eccoob_8; + tmp = readw(host->regs + NFC_CONFIG1); + tmp |= NFC_ECC_EN; + writew(tmp, host->regs + NFC_CONFIG1); + } else { + this->ecc.size = 512; + this->ecc.bytes = 3; + this->ecc.layout = &nand_hw_eccoob_8; + this->ecc.mode = NAND_ECC_SOFT; + tmp = readw(host->regs + NFC_CONFIG1); + tmp &= ~NFC_ECC_EN; + writew(tmp, host->regs + NFC_CONFIG1); + } + + /* Reset NAND */ + this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + + /* preset operation */ + /* Unlock the internal RAM Buffer */ + writew(0x2, host->regs + NFC_CONFIG); + + /* Blocks to be unlocked */ + writew(0x0, host->regs + NFC_UNLOCKSTART_BLKADDR); + writew(0x4000, host->regs + NFC_UNLOCKEND_BLKADDR); + + /* Unlock Block Command for given address range */ + writew(0x4, host->regs + NFC_WRPROT); + + /* NAND bus width determines access funtions used by upper layer */ + if (pdata->width == 2) { + this->options |= NAND_BUSWIDTH_16; + this->ecc.layout = &nand_hw_eccoob_16; + } + + host->pagesize_2k = 0; + + /* Scan to find existence of the device */ + if (nand_scan(mtd, 1)) { + DEBUG(MTD_DEBUG_LEVEL0, + "MXC_ND: Unable to find any NAND device.\n"); + err = -ENXIO; + goto escan; + } + + /* Register the partitions */ +#ifdef CONFIG_MTD_PARTITIONS + nr_parts = + parse_mtd_partitions(mtd, part_probes, &host->parts, 0); + if (nr_parts > 0) + add_mtd_partitions(mtd, host->parts, nr_parts); + else +#endif + { + pr_info("Registering %s as whole device\n", mtd->name); + add_mtd_device(mtd); + } + + platform_set_drvdata(pdev, host); + + return 0; + +escan: + free_irq(host->irq, NULL); +eirq: + iounmap(host->regs); +eres: + clk_put(host->clk); +eclk: + kfree(host); + + return err; +} + +static int __devexit mxcnd_remove(struct platform_device *pdev) +{ + struct mxc_nand_host *host = platform_get_drvdata(pdev); + + clk_put(host->clk); + + platform_set_drvdata(pdev, NULL); + + nand_release(&host->mtd); + free_irq(host->irq, NULL); + iounmap(host->regs); + kfree(host); + + return 0; +} + +#ifdef CONFIG_PM +static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct mtd_info *info = platform_get_drvdata(pdev); + int ret = 0; + + DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); + if (info) + ret = info->suspend(info); + + /* Disable the NFC clock */ + clk_disable(nfc_clk); /* FIXME */ + + return ret; +} + +static int mxcnd_resume(struct platform_device *pdev) +{ + struct mtd_info *info = platform_get_drvdata(pdev); + int ret = 0; + + DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); + /* Enable the NFC clock */ + clk_enable(nfc_clk); /* FIXME */ + + if (info) + info->resume(info); + + return ret; +} + +#else +# define mxcnd_suspend NULL +# define mxcnd_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver mxcnd_driver = { + .driver = { + .name = DRIVER_NAME, + }, + .remove = __exit_p(mxcnd_remove), + .suspend = mxcnd_suspend, + .resume = mxcnd_resume, +}; + +static int __init mxc_nd_init(void) +{ + /* Register the device driver structure. */ + pr_info("MXC MTD nand Driver\n"); + if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) { + printk(KERN_ERR "Driver register failed for mxcnd_driver\n"); + return -ENODEV; + } + return 0; +} + +static void __exit mxc_nd_cleanup(void) +{ + /* Unregister the device structure */ + platform_driver_unregister(&mxcnd_driver); +} + +module_init(mxc_nd_init); +module_exit(mxc_nd_cleanup); + +MODULE_AUTHOR("Freescale Semiconductor, Inc."); +MODULE_DESCRIPTION("MXC NAND MTD driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 7d200e88cbdff5334d23d3af8d444eb9cc041962 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 31 Aug 2008 19:32:13 +0300 Subject: UBI: remove BKL We do not need BKL in UBI because we serialize things properly. Signed-off-by: Artem Bityutskiy diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 03c759b..b30a0b8 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -104,12 +104,9 @@ static int vol_cdev_open(struct inode *inode, struct file *file) struct ubi_volume_desc *desc; int vol_id = iminor(inode) - 1, mode, ubi_num; - lock_kernel(); ubi_num = ubi_major2num(imajor(inode)); - if (ubi_num < 0) { - unlock_kernel(); + if (ubi_num < 0) return ubi_num; - } if (file->f_mode & FMODE_WRITE) mode = UBI_READWRITE; @@ -119,7 +116,6 @@ static int vol_cdev_open(struct inode *inode, struct file *file) dbg_gen("open volume %d, mode %d", vol_id, mode); desc = ubi_open_volume(ubi_num, vol_id, mode); - unlock_kernel(); if (IS_ERR(desc)) return PTR_ERR(desc); -- cgit v0.10.2 From 7bb67439bf6bd3782f07f1d7be1e63406453d5de Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 31 Aug 2008 08:05:58 -0700 Subject: select: Introduce a hrtimeout function This patch adds a schedule_hrtimeout() function, to be used by select() and poll() in a later patch. This function works similar to schedule_timeout() in most ways, but takes a timespec rather than jiffies. With a lot of contributions/fixes from Thomas Signed-off-by: Arjan van de Ven Signed-off-by: Thomas Gleixner diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce..becd17d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -346,6 +346,8 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *tsk); +extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); + /* Soft interrupt function to run the hrtimer queues: */ extern void hrtimer_run_queues(void); extern void hrtimer_run_pending(void); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8e4dce..782137d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1678,3 +1678,68 @@ void __init hrtimers_init(void) #endif } +/** + * schedule_hrtimeout - sleep until timeout + * @expires: timeout value (ktime_t) + * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL + * + * Make the current task sleep until the given expiry time has + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to + * pass before the routine returns. + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Returns 0 when the timer has expired otherwise -EINTR + */ +int __sched schedule_hrtimeout(ktime_t *expires, + const enum hrtimer_mode mode) +{ + struct hrtimer_sleeper t; + + /* + * Optimize when a zero timeout value is given. It does not + * matter whether this is an absolute or a relative time. + */ + if (expires && !expires->tv64) { + __set_current_state(TASK_RUNNING); + return 0; + } + + /* + * A NULL parameter means "inifinte" + */ + if (!expires) { + schedule(); + __set_current_state(TASK_RUNNING); + return -EINTR; + } + + hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); + t.timer.expires = *expires; + + hrtimer_init_sleeper(&t, current); + + hrtimer_start(&t.timer, t.timer.expires, mode); + if (!hrtimer_active(&t.timer)) + t.task = NULL; + + if (likely(t.task)) + schedule(); + + hrtimer_cancel(&t.timer); + destroy_hrtimer_on_stack(&t.timer); + + __set_current_state(TASK_RUNNING); + + return !t.task ? 0 : -EINTR; +} +EXPORT_SYMBOL_GPL(schedule_hrtimeout); -- cgit v0.10.2 From df0cc0539b4127bd02f64de2c335b4af1fdb3845 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 31 Aug 2008 08:09:53 -0700 Subject: select: add a timespec_add_safe() function For the select() rework, it's important to be able to add timespec structures in an overflow-safe manner. This patch adds a timespec_add_safe() function for this which is similar in operation to ktime_add_safe(), but works on a struct timespec. Signed-off-by: Thomas Gleixner Signed-off-by: Arjan van de Ven diff --git a/include/linux/time.h b/include/linux/time.h index e15206a..7269764 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -38,6 +38,8 @@ struct timezone { #define NSEC_PER_SEC 1000000000L #define FSEC_PER_SEC 1000000000000000L +#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) + static inline int timespec_equal(const struct timespec *a, const struct timespec *b) { @@ -72,6 +74,8 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, const unsigned int min, const unsigned int sec); extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); +extern struct timespec timespec_add_safe(const struct timespec lhs, + const struct timespec rhs); /* * sub = lhs - rhs, in normalized form diff --git a/kernel/time.c b/kernel/time.c index 6a08660..d63a433 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -669,3 +669,21 @@ EXPORT_SYMBOL(get_jiffies_64); #endif EXPORT_SYMBOL(jiffies); + +/* + * Add two timespec values and do a safety check for overflow. + * It's assumed that both values are valid (>= 0) + */ +struct timespec timespec_add_safe(const struct timespec lhs, + const struct timespec rhs) +{ + struct timespec res; + + set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + + if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) + res.tv_sec = TIME_T_MAX; + + return res; +} -- cgit v0.10.2 From b773ad40aca5bd755ba886620842f16e8fef6d75 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 31 Aug 2008 08:16:57 -0700 Subject: select: add poll_select_set_timeout() and poll_select_copy_remaining() helpers This patch adds 2 helpers that will be used for the hrtimer based select/poll: poll_select_set_timeout() is a helper that takes a timeout (as a second, nanosecond pair) and turns that into a "struct timespec" that represents the absolute end time. This is a common operation in the many select() and poll() variants and needs various, common, sanity checks. poll_select_copy_remaining() is a helper that takes care of copying the remaining time to userspace, as select(), pselect() and ppoll() do. This function comes in both a natural and a compat implementation (due to datastructure differences). Signed-off-by: Thomas Gleixner Signed-off-by: Arjan van de Ven diff --git a/fs/compat.c b/fs/compat.c index 075d050..424767c 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1436,6 +1436,57 @@ out_ret: #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) +static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, + int timeval, int ret) +{ + struct timespec ts; + + if (!p) + return ret; + + if (current->personality & STICKY_TIMEOUTS) + goto sticky; + + /* No update for zero timeout */ + if (!end_time->tv_sec && !end_time->tv_nsec) + return ret; + + ktime_get_ts(&ts); + ts = timespec_sub(*end_time, ts); + if (ts.tv_sec < 0) + ts.tv_sec = ts.tv_nsec = 0; + + if (timeval) { + struct compat_timeval rtv; + + rtv.tv_sec = ts.tv_sec; + rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; + + if (!copy_to_user(p, &rtv, sizeof(rtv))) + return ret; + } else { + struct compat_timespec rts; + + rts.tv_sec = ts.tv_sec; + rts.tv_nsec = ts.tv_nsec; + + if (!copy_to_user(p, &rts, sizeof(rts))) + return ret; + } + /* + * If an application puts its timeval in read-only memory, we + * don't want the Linux-specific update to the timeval to + * cause a fault after the select has completed + * successfully. However, because we're not updating the + * timeval, we can't restart the system call. + */ + +sticky: + if (ret == -ERESTARTNOHAND) + ret = -EINTR; + return ret; +} + /* * Ooo, nasty. We need here to frob 32-bit unsigned longs to * 64-bit unsigned longs. diff --git a/fs/select.c b/fs/select.c index da0e882..1180a62 100644 --- a/fs/select.c +++ b/fs/select.c @@ -130,6 +130,81 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, add_wait_queue(wait_address, &entry->wait); } +/** + * poll_select_set_timeout - helper function to setup the timeout value + * @to: pointer to timespec variable for the final timeout + * @sec: seconds (from user space) + * @nsec: nanoseconds (from user space) + * + * Note, we do not use a timespec for the user space value here, That + * way we can use the function for timeval and compat interfaces as well. + * + * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. + */ +int poll_select_set_timeout(struct timespec *to, long sec, long nsec) +{ + struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; + + if (!timespec_valid(&ts)) + return -EINVAL; + + /* Optimize for the zero timeout value here */ + if (!sec && !nsec) { + to->tv_sec = to->tv_nsec = 0; + } else { + ktime_get_ts(to); + *to = timespec_add_safe(*to, ts); + } + return 0; +} + +static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, + int timeval, int ret) +{ + struct timespec rts; + struct timeval rtv; + + if (!p) + return ret; + + if (current->personality & STICKY_TIMEOUTS) + goto sticky; + + /* No update for zero timeout */ + if (!end_time->tv_sec && !end_time->tv_nsec) + return ret; + + ktime_get_ts(&rts); + rts = timespec_sub(*end_time, rts); + if (rts.tv_sec < 0) + rts.tv_sec = rts.tv_nsec = 0; + + if (timeval) { + rtv.tv_sec = rts.tv_sec; + rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; + + if (!copy_to_user(p, &rtv, sizeof(rtv))) + return ret; + + } else if (!copy_to_user(p, &rts, sizeof(rts))) + return ret; + + /* + * If an application puts its timeval in read-only memory, we + * don't want the Linux-specific update to the timeval to + * cause a fault after the select has completed + * successfully. However, because we're not updating the + * timeval, we can't restart the system call. + */ + +sticky: + if (ret == -ERESTARTNOHAND) + ret = -EINTR; + return ret; +} + + + #define FDS_IN(fds, n) (fds->in + n) #define FDS_OUT(fds, n) (fds->out + n) #define FDS_EX(fds, n) (fds->ex + n) diff --git a/include/linux/poll.h b/include/linux/poll.h index ef45382..f65de51 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -120,6 +120,8 @@ extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, s64 *timeout); +extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); + #endif /* KERNEL */ #endif /* _LINUX_POLL_H */ -- cgit v0.10.2 From be5dad20a55e054a35dac7f6f5f184dc72b379b4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 31 Aug 2008 08:19:15 -0700 Subject: select: add a poll specific struct to the restart_block union with hrtimer poll/select, the signal restart data no longer is a single long representing a jiffies count, but it becomes a second/nanosecond pair that also needs to encode if there was a timeout at all or not. This patch adds a struct to the restart_block union for this purpose Signed-off-by: Thomas Gleixner Signed-off-by: Arjan van de Ven diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 38a5647..e6b820f 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -38,6 +38,14 @@ struct restart_block { #endif u64 expires; } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; }; }; -- cgit v0.10.2 From 8ff3e8e85fa6c312051134b3953e397feb639f51 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 31 Aug 2008 08:26:40 -0700 Subject: select: switch select() and poll() over to hrtimers With lots of help, input and cleanups from Thomas Gleixner This patch switches select() and poll() over to hrtimers. The core of the patch is replacing the "s64 timeout" with a "struct timespec end_time" in all the plumbing. But most of the diffstat comes from using the just introduced helpers: poll_select_set_timeout poll_select_copy_remaining timespec_add_safe which make manipulating the timespec easier and less error-prone. Signed-off-by: Arjan van de Ven Signed-off-by: Thomas Gleixner diff --git a/fs/compat.c b/fs/compat.c index 424767c..133ed7f 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1568,7 +1568,8 @@ int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) int compat_core_sys_select(int n, compat_ulong_t __user *inp, - compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout) + compat_ulong_t __user *outp, compat_ulong_t __user *exp, + struct timespec *end_time) { fd_set_bits fds; void *bits; @@ -1615,7 +1616,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_ex); - ret = do_select(n, &fds, timeout); + ret = do_select(n, &fds, end_time); if (ret < 0) goto out; @@ -1641,7 +1642,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct compat_timeval __user *tvp) { - s64 timeout = -1; + struct timespec end_time, *to = NULL; struct compat_timeval tv; int ret; @@ -1649,43 +1650,14 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, if (copy_from_user(&tv, tvp, sizeof(tv))) return -EFAULT; - if (tv.tv_sec < 0 || tv.tv_usec < 0) + to = &end_time; + if (poll_select_set_timeout(to, tv.tv_sec, + tv.tv_usec * NSEC_PER_USEC)) return -EINVAL; - - /* Cast to u64 to make GCC stop complaining */ - if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS) - timeout = -1; /* infinite */ - else { - timeout = DIV_ROUND_UP(tv.tv_usec, 1000000/HZ); - timeout += tv.tv_sec * HZ; - } } - ret = compat_core_sys_select(n, inp, outp, exp, &timeout); - - if (tvp) { - struct compat_timeval rtv; - - if (current->personality & STICKY_TIMEOUTS) - goto sticky; - rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); - rtv.tv_sec = timeout; - if (compat_timeval_compare(&rtv, &tv) >= 0) - rtv = tv; - if (copy_to_user(tvp, &rtv, sizeof(rtv))) { -sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND) - ret = -EINTR; - } - } + ret = compat_core_sys_select(n, inp, outp, exp, to); + ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); return ret; } @@ -1698,15 +1670,16 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp, { compat_sigset_t ss32; sigset_t ksigmask, sigsaved; - s64 timeout = MAX_SCHEDULE_TIMEOUT; struct compat_timespec ts; + struct timespec end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; - if (ts.tv_sec < 0 || ts.tv_nsec < 0) + to = &end_time; + if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; } @@ -1721,51 +1694,8 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp, sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } - do { - if (tsp) { - if ((unsigned long)ts.tv_sec < MAX_SELECT_SECONDS) { - timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ); - timeout += ts.tv_sec * (unsigned long)HZ; - ts.tv_sec = 0; - ts.tv_nsec = 0; - } else { - ts.tv_sec -= MAX_SELECT_SECONDS; - timeout = MAX_SELECT_SECONDS * HZ; - } - } - - ret = compat_core_sys_select(n, inp, outp, exp, &timeout); - - } while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec)); - - if (tsp) { - struct compat_timespec rts; - - if (current->personality & STICKY_TIMEOUTS) - goto sticky; - - rts.tv_sec = timeout / HZ; - rts.tv_nsec = (timeout % HZ) * (NSEC_PER_SEC/HZ); - if (rts.tv_nsec >= NSEC_PER_SEC) { - rts.tv_sec++; - rts.tv_nsec -= NSEC_PER_SEC; - } - if (compat_timespec_compare(&rts, &ts) >= 0) - rts = ts; - if (copy_to_user(tsp, &rts, sizeof(rts))) { -sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND) - ret = -EINTR; - } - } + ret = compat_core_sys_select(n, inp, outp, exp, to); + ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); if (ret == -ERESTARTNOHAND) { /* @@ -1810,18 +1740,16 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, compat_sigset_t ss32; sigset_t ksigmask, sigsaved; struct compat_timespec ts; - s64 timeout = -1; + struct timespec end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; - /* We assume that ts.tv_sec is always lower than - the number of seconds that can be expressed in - an s64. Otherwise the compiler bitches at us */ - timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ); - timeout += ts.tv_sec * HZ; + to = &end_time; + if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) + return -EINVAL; } if (sigmask) { @@ -1835,7 +1763,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } - ret = do_sys_poll(ufds, nfds, &timeout); + ret = do_sys_poll(ufds, nfds, to); /* We can restart this syscall, usually */ if (ret == -EINTR) { @@ -1853,31 +1781,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, } else if (sigmask) sigprocmask(SIG_SETMASK, &sigsaved, NULL); - if (tsp && timeout >= 0) { - struct compat_timespec rts; - - if (current->personality & STICKY_TIMEOUTS) - goto sticky; - /* Yes, we know it's actually an s64, but it's also positive. */ - rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * - 1000; - rts.tv_sec = timeout; - if (compat_timespec_compare(&rts, &ts) >= 0) - rts = ts; - if (copy_to_user(tsp, &rts, sizeof(rts))) { -sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND && timeout >= 0) - ret = -EINTR; - } - } + ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); return ret; } diff --git a/fs/select.c b/fs/select.c index 1180a62..f6dceb5 100644 --- a/fs/select.c +++ b/fs/select.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -203,8 +204,6 @@ sticky: return ret; } - - #define FDS_IN(fds, n) (fds->in + n) #define FDS_OUT(fds, n) (fds->out + n) #define FDS_EX(fds, n) (fds->ex + n) @@ -257,11 +256,12 @@ get_max: #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) #define POLLEX_SET (POLLPRI) -int do_select(int n, fd_set_bits *fds, s64 *timeout) +int do_select(int n, fd_set_bits *fds, struct timespec *end_time) { + ktime_t expire, *to = NULL; struct poll_wqueues table; poll_table *wait; - int retval, i; + int retval, i, timed_out = 0; rcu_read_lock(); retval = max_select_fd(n, fds); @@ -273,12 +273,14 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) poll_initwait(&table); wait = &table.pt; - if (!*timeout) + if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { wait = NULL; + timed_out = 1; + } + retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; - long __timeout; set_current_state(TASK_INTERRUPTIBLE); @@ -334,27 +336,25 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) cond_resched(); } wait = NULL; - if (retval || !*timeout || signal_pending(current)) + if (retval || timed_out || signal_pending(current)) break; if (table.error) { retval = table.error; break; } - if (*timeout < 0) { - /* Wait indefinitely */ - __timeout = MAX_SCHEDULE_TIMEOUT; - } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT - 1)) { - /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in a loop */ - __timeout = MAX_SCHEDULE_TIMEOUT - 1; - *timeout -= __timeout; - } else { - __timeout = *timeout; - *timeout = 0; + /* + * If this is the first loop and we have a timeout + * given, then we convert to ktime_t and set the to + * pointer to the expiry value. + */ + if (end_time && !to) { + expire = timespec_to_ktime(*end_time); + to = &expire; } - __timeout = schedule_timeout(__timeout); - if (*timeout >= 0) - *timeout += __timeout; + + if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) + timed_out = 1; } __set_current_state(TASK_RUNNING); @@ -375,7 +375,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, s64 *timeout) + fd_set __user *exp, struct timespec *end_time) { fd_set_bits fds; void *bits; @@ -426,7 +426,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_ex); - ret = do_select(n, &fds, timeout); + ret = do_select(n, &fds, end_time); if (ret < 0) goto out; @@ -452,7 +452,7 @@ out_nofds: asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp) { - s64 timeout = -1; + struct timespec end_time, *to = NULL; struct timeval tv; int ret; @@ -460,43 +460,14 @@ asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, if (copy_from_user(&tv, tvp, sizeof(tv))) return -EFAULT; - if (tv.tv_sec < 0 || tv.tv_usec < 0) + to = &end_time; + if (poll_select_set_timeout(to, tv.tv_sec, + tv.tv_usec * NSEC_PER_USEC)) return -EINVAL; - - /* Cast to u64 to make GCC stop complaining */ - if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS) - timeout = -1; /* infinite */ - else { - timeout = DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ); - timeout += tv.tv_sec * HZ; - } } - ret = core_sys_select(n, inp, outp, exp, &timeout); - - if (tvp) { - struct timeval rtv; - - if (current->personality & STICKY_TIMEOUTS) - goto sticky; - rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)); - rtv.tv_sec = timeout; - if (timeval_compare(&rtv, &tv) >= 0) - rtv = tv; - if (copy_to_user(tvp, &rtv, sizeof(rtv))) { -sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND) - ret = -EINTR; - } - } + ret = core_sys_select(n, inp, outp, exp, to); + ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); return ret; } @@ -506,25 +477,17 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timespec __user *tsp, const sigset_t __user *sigmask, size_t sigsetsize) { - s64 timeout = MAX_SCHEDULE_TIMEOUT; sigset_t ksigmask, sigsaved; - struct timespec ts; + struct timespec ts, end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; - if (ts.tv_sec < 0 || ts.tv_nsec < 0) + to = &end_time; + if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) return -EINVAL; - - /* Cast to u64 to make GCC stop complaining */ - if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS) - timeout = -1; /* infinite */ - else { - timeout = DIV_ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ); - timeout += ts.tv_sec * HZ; - } } if (sigmask) { @@ -538,32 +501,8 @@ asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp, sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } - ret = core_sys_select(n, inp, outp, exp, &timeout); - - if (tsp) { - struct timespec rts; - - if (current->personality & STICKY_TIMEOUTS) - goto sticky; - rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * - 1000; - rts.tv_sec = timeout; - if (timespec_compare(&rts, &ts) >= 0) - rts = ts; - if (copy_to_user(tsp, &rts, sizeof(rts))) { -sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND) - ret = -EINTR; - } - } + ret = core_sys_select(n, inp, outp, exp, &end_time); + ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); if (ret == -ERESTARTNOHAND) { /* @@ -649,18 +588,20 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) } static int do_poll(unsigned int nfds, struct poll_list *list, - struct poll_wqueues *wait, s64 *timeout) + struct poll_wqueues *wait, struct timespec *end_time) { - int count = 0; poll_table* pt = &wait->pt; + ktime_t expire, *to = NULL; + int timed_out = 0, count = 0; /* Optimise the no-wait case */ - if (!(*timeout)) + if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { pt = NULL; + timed_out = 1; + } for (;;) { struct poll_list *walk; - long __timeout; set_current_state(TASK_INTERRUPTIBLE); for (walk = list; walk != NULL; walk = walk->next) { @@ -692,27 +633,21 @@ static int do_poll(unsigned int nfds, struct poll_list *list, if (signal_pending(current)) count = -EINTR; } - if (count || !*timeout) + if (count || timed_out) break; - if (*timeout < 0) { - /* Wait indefinitely */ - __timeout = MAX_SCHEDULE_TIMEOUT; - } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT-1)) { - /* - * Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in - * a loop - */ - __timeout = MAX_SCHEDULE_TIMEOUT - 1; - *timeout -= __timeout; - } else { - __timeout = *timeout; - *timeout = 0; + /* + * If this is the first loop and we have a timeout + * given, then we convert to ktime_t and set the to + * pointer to the expiry value. + */ + if (end_time && !to) { + expire = timespec_to_ktime(*end_time); + to = &expire; } - __timeout = schedule_timeout(__timeout); - if (*timeout >= 0) - *timeout += __timeout; + if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) + timed_out = 1; } __set_current_state(TASK_RUNNING); return count; @@ -721,7 +656,8 @@ static int do_poll(unsigned int nfds, struct poll_list *list, #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ sizeof(struct pollfd)) -int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) +int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, + struct timespec *end_time) { struct poll_wqueues table; int err = -EFAULT, fdcount, len, size; @@ -761,7 +697,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout) } poll_initwait(&table); - fdcount = do_poll(nfds, head, &table, timeout); + fdcount = do_poll(nfds, head, &table, end_time); poll_freewait(&table); for (walk = head; walk; walk = walk->next) { @@ -787,16 +723,21 @@ out_fds: static long do_restart_poll(struct restart_block *restart_block) { - struct pollfd __user *ufds = (struct pollfd __user*)restart_block->arg0; - int nfds = restart_block->arg1; - s64 timeout = ((s64)restart_block->arg3<<32) | (s64)restart_block->arg2; + struct pollfd __user *ufds = restart_block->poll.ufds; + int nfds = restart_block->poll.nfds; + struct timespec *to = NULL, end_time; int ret; - ret = do_sys_poll(ufds, nfds, &timeout); + if (restart_block->poll.has_timeout) { + end_time.tv_sec = restart_block->poll.tv_sec; + end_time.tv_nsec = restart_block->poll.tv_nsec; + to = &end_time; + } + + ret = do_sys_poll(ufds, nfds, to); + if (ret == -EINTR) { restart_block->fn = do_restart_poll; - restart_block->arg2 = timeout & 0xFFFFFFFF; - restart_block->arg3 = (u64)timeout >> 32; ret = -ERESTART_RESTARTBLOCK; } return ret; @@ -805,31 +746,32 @@ static long do_restart_poll(struct restart_block *restart_block) asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, long timeout_msecs) { - s64 timeout_jiffies; + struct timespec end_time, *to = NULL; int ret; - if (timeout_msecs > 0) { -#if HZ > 1000 - /* We can only overflow if HZ > 1000 */ - if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ) - timeout_jiffies = -1; - else -#endif - timeout_jiffies = msecs_to_jiffies(timeout_msecs) + 1; - } else { - /* Infinite (< 0) or no (0) timeout */ - timeout_jiffies = timeout_msecs; + if (timeout_msecs >= 0) { + to = &end_time; + poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, + NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); } - ret = do_sys_poll(ufds, nfds, &timeout_jiffies); + ret = do_sys_poll(ufds, nfds, to); + if (ret == -EINTR) { struct restart_block *restart_block; + restart_block = ¤t_thread_info()->restart_block; restart_block->fn = do_restart_poll; - restart_block->arg0 = (unsigned long)ufds; - restart_block->arg1 = nfds; - restart_block->arg2 = timeout_jiffies & 0xFFFFFFFF; - restart_block->arg3 = (u64)timeout_jiffies >> 32; + restart_block->poll.ufds = ufds; + restart_block->poll.nfds = nfds; + + if (timeout_msecs >= 0) { + restart_block->poll.tv_sec = end_time.tv_sec; + restart_block->poll.tv_nsec = end_time.tv_nsec; + restart_block->poll.has_timeout = 1; + } else + restart_block->poll.has_timeout = 0; + ret = -ERESTART_RESTARTBLOCK; } return ret; @@ -841,21 +783,16 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, size_t sigsetsize) { sigset_t ksigmask, sigsaved; - struct timespec ts; - s64 timeout = -1; + struct timespec ts, end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; - /* Cast to u64 to make GCC stop complaining */ - if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS) - timeout = -1; /* infinite */ - else { - timeout = DIV_ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ); - timeout += ts.tv_sec * HZ; - } + to = &end_time; + if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) + return -EINVAL; } if (sigmask) { @@ -869,7 +806,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } - ret = do_sys_poll(ufds, nfds, &timeout); + ret = do_sys_poll(ufds, nfds, to); /* We can restart this syscall, usually */ if (ret == -EINTR) { @@ -887,31 +824,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, } else if (sigmask) sigprocmask(SIG_SETMASK, &sigsaved, NULL); - if (tsp && timeout >= 0) { - struct timespec rts; - - if (current->personality & STICKY_TIMEOUTS) - goto sticky; - /* Yes, we know it's actually an s64, but it's also positive. */ - rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * - 1000; - rts.tv_sec = timeout; - if (timespec_compare(&rts, &ts) >= 0) - rts = ts; - if (copy_to_user(tsp, &rts, sizeof(rts))) { - sticky: - /* - * If an application puts its timeval in read-only - * memory, we don't want the Linux-specific update to - * the timeval to cause a fault after the select has - * completed successfully. However, because we're not - * updating the timeval, we can't restart the system - * call. - */ - if (ret == -ERESTARTNOHAND && timeout >= 0) - ret = -EINTR; - } - } + ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); return ret; } diff --git a/include/linux/poll.h b/include/linux/poll.h index f65de51..badd98a 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -114,11 +114,11 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset) #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) -extern int do_select(int n, fd_set_bits *fds, s64 *timeout); +extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, - s64 *timeout); + struct timespec *end_time); extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, s64 *timeout); + fd_set __user *exp, struct timespec *end_time); extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); -- cgit v0.10.2 From 63ca243b271f5b44e0b1057003cf498b6d0fadf7 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 14:35:02 -0700 Subject: hrtimer: add abstraction functions for accessing the "expires" member In order to be able to turn hrtimers into range based, we need to provide accessor functions for getting to the "expires" ktime_t member of the struct hrtimer. This patch adds a set of accessors for this purpose: * hrtimer_set_expires * hrtimer_set_expires_tv64 * hrtimer_add_expires * hrtimer_add_expires_ns * hrtimer_get_expires * hrtimer_get_expires_tv64 * hrtimer_get_expires_ns * hrtimer_expires_remaining * hrtimer_start_expires No users of these new accessors are added yet; these follow in later patches. Hopefully this patch can even go into 2.6.27-rc so that the conversions will not have a bottleneck in -next Signed-off-by: Arjan van de Ven diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index becd17d..9900e99 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -217,6 +217,45 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) return timer->base->cpu_base->hres_active; } +static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) +{ + timer->expires = time; +} +static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) +{ + timer->expires.tv64 = tv64; +} + +static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) +{ + timer->expires = ktime_add_safe(timer->expires, time); +} + +static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) +{ + timer->expires = ktime_add_ns(timer->expires, ns); +} + +static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) +{ + return timer->expires; +} + +static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) +{ + return timer->expires.tv64; +} + +static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) +{ + return ktime_to_ns(timer->expires); +} + +static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) +{ + return ktime_sub(timer->expires, timer->base->get_time()); +} + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an @@ -287,6 +326,12 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); +static inline int hrtimer_start_expires(struct hrtimer *timer, + enum hrtimer_mode mode) +{ + return hrtimer_start(timer, hrtimer_get_expires(timer), mode); +} + static inline int hrtimer_restart(struct hrtimer *timer) { return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); -- cgit v0.10.2 From beb20d52d03a51218827fb4a36a4b583debb03f9 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 14:55:57 -0700 Subject: hrtimer: convert kvm to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts KVM to these accessors. Signed-off-by: Arjan van de Ven diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index c0f7872..1bf8f57 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -205,8 +205,8 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) wake_up_interruptible(&vcpu0->wq); } - pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); - pt->scheduled = ktime_to_ns(pt->timer.expires); + hrtimer_add_expires_ns(&pt->timer, pt->period); + pt->scheduled = ktime_to_ns(hrtimer_get_expires(&pt->timer)); return (pt->period == 0 ? 0 : 1); } @@ -246,7 +246,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) timer = &pit->pit_state.pit_timer.timer; if (hrtimer_cancel(timer)) - hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static void destroy_pit_timer(struct kvm_kpit_timer *pt) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 73f43de..a5b61de 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -953,9 +953,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic) } if (apic_lvtt_period(apic)) { result = 1; - apic->timer.dev.expires = ktime_add_ns( - apic->timer.dev.expires, - apic->timer.period); + hrtimer_add_expires_ns(&apic->timer.dev, apic->timer.period); } return result; } @@ -1124,7 +1122,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) timer = &apic->timer.dev; if (hrtimer_cancel(timer)) - hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) -- cgit v0.10.2 From 76369470b7e5f97fc1a8af83c45b9ff739b08cb6 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:00:14 -0700 Subject: hrtimer: convert timerfd to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts timerfd to these accessors. Signed-off-by: Arjan van de Ven diff --git a/fs/timerfd.c b/fs/timerfd.c index c502c60..0862f0e 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -52,11 +52,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) { - ktime_t now, remaining; - - now = ctx->tmr.base->get_time(); - remaining = ktime_sub(ctx->tmr.expires, now); + ktime_t remaining; + remaining = hrtimer_expires_remaining(&ctx->tmr); return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; } @@ -74,7 +72,7 @@ static void timerfd_setup(struct timerfd_ctx *ctx, int flags, ctx->ticks = 0; ctx->tintv = timespec_to_ktime(ktmr->it_interval); hrtimer_init(&ctx->tmr, ctx->clockid, htmode); - ctx->tmr.expires = texp; + hrtimer_set_expires(&ctx->tmr, texp); ctx->tmr.function = timerfd_tmrproc; if (texp.tv64 != 0) hrtimer_start(&ctx->tmr, texp, htmode); -- cgit v0.10.2 From 23dd7bb09bd8d7efd8a602aed97b93d52f85e675 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:00:54 -0700 Subject: hrtimer: convert net::sched_cbq to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts sched_cbq to these accessors. Signed-off-by: Arjan van de Ven diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 8b06fa9..03e389e 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -545,9 +545,10 @@ static void cbq_ovl_delay(struct cbq_class *cl) expires = ktime_set(0, 0); expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); if (hrtimer_try_to_cancel(&q->delay_timer) && - ktime_to_ns(ktime_sub(q->delay_timer.expires, - expires)) > 0) - q->delay_timer.expires = expires; + ktime_to_ns(ktime_sub( + hrtimer_get_expires(&q->delay_timer), + expires)) > 0) + hrtimer_set_expires(&q->delay_timer, expires); hrtimer_restart(&q->delay_timer); cl->delayed = 1; cl->xstats.overactions++; -- cgit v0.10.2 From cc584b213f252bf698849cf4be2377cd3ec7501a Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:02:30 -0700 Subject: hrtimer: convert kernel/* to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts kernel/* to these accessors. Signed-off-by: Arjan van de Ven diff --git a/kernel/futex.c b/kernel/futex.c index 7d1136e..4cd5b43 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1299,10 +1299,9 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(&t, current); - t.timer.expires = *abs_time; + hrtimer_set_expires(&t.timer, *abs_time); - hrtimer_start(&t.timer, t.timer.expires, - HRTIMER_MODE_ABS); + hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&t.timer)) t.task = NULL; @@ -1404,7 +1403,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); - to->timer.expires = *time; + hrtimer_set_expires(&to->timer, *time); } q.pi_state = NULL; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 782137d..ae307fe 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -517,7 +517,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) if (!base->first) continue; timer = rb_entry(base->first, struct hrtimer, node); - expires = ktime_sub(timer->expires, base->offset); + expires = ktime_sub(hrtimer_get_expires(timer), base->offset); if (expires.tv64 < cpu_base->expires_next.tv64) cpu_base->expires_next = expires; } @@ -539,10 +539,10 @@ static int hrtimer_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base) { ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; - ktime_t expires = ktime_sub(timer->expires, base->offset); + ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); int res; - WARN_ON_ONCE(timer->expires.tv64 < 0); + WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); /* * When the callback is running, we do not reprogram the clock event @@ -794,7 +794,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) u64 orun = 1; ktime_t delta; - delta = ktime_sub(now, timer->expires); + delta = ktime_sub(now, hrtimer_get_expires(timer)); if (delta.tv64 < 0) return 0; @@ -806,8 +806,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); - timer->expires = ktime_add_ns(timer->expires, incr * orun); - if (timer->expires.tv64 > now.tv64) + hrtimer_add_expires_ns(timer, incr * orun); + if (hrtimer_get_expires_tv64(timer) > now.tv64) return orun; /* * This (and the ktime_add() below) is the @@ -815,7 +815,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) */ orun++; } - timer->expires = ktime_add_safe(timer->expires, interval); + hrtimer_add_expires(timer, interval); return orun; } @@ -847,7 +847,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, * We dont care about collisions. Nodes with * the same expiry time stay together. */ - if (timer->expires.tv64 < entry->expires.tv64) { + if (hrtimer_get_expires_tv64(timer) < + hrtimer_get_expires_tv64(entry)) { link = &(*link)->rb_left; } else { link = &(*link)->rb_right; @@ -982,7 +983,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) #endif } - timer->expires = tim; + hrtimer_set_expires(timer, tim); timer_stats_hrtimer_set_start_info(timer); @@ -1076,7 +1077,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer) ktime_t rem; base = lock_hrtimer_base(timer, &flags); - rem = ktime_sub(timer->expires, base->get_time()); + rem = hrtimer_expires_remaining(timer); unlock_hrtimer_base(timer, &flags); return rem; @@ -1108,7 +1109,7 @@ ktime_t hrtimer_get_next_event(void) continue; timer = rb_entry(base->first, struct hrtimer, node); - delta.tv64 = timer->expires.tv64; + delta.tv64 = hrtimer_get_expires_tv64(timer); delta = ktime_sub(delta, base->get_time()); if (delta.tv64 < mindelta.tv64) mindelta.tv64 = delta.tv64; @@ -1308,10 +1309,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) timer = rb_entry(node, struct hrtimer, node); - if (basenow.tv64 < timer->expires.tv64) { + if (basenow.tv64 < hrtimer_get_expires_tv64(timer)) { ktime_t expires; - expires = ktime_sub(timer->expires, + expires = ktime_sub(hrtimer_get_expires(timer), base->offset); if (expires.tv64 < expires_next.tv64) expires_next = expires; @@ -1414,7 +1415,8 @@ void hrtimer_run_queues(void) struct hrtimer *timer; timer = rb_entry(node, struct hrtimer, node); - if (base->softirq_time.tv64 <= timer->expires.tv64) + if (base->softirq_time.tv64 <= + hrtimer_get_expires_tv64(timer)) break; if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { @@ -1462,7 +1464,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod do { set_current_state(TASK_INTERRUPTIBLE); - hrtimer_start(&t->timer, t->timer.expires, mode); + hrtimer_start_expires(&t->timer, mode); if (!hrtimer_active(&t->timer)) t->task = NULL; @@ -1484,7 +1486,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) struct timespec rmt; ktime_t rem; - rem = ktime_sub(timer->expires, timer->base->get_time()); + rem = hrtimer_expires_remaining(timer); if (rem.tv64 <= 0) return 0; rmt = ktime_to_timespec(rem); @@ -1503,7 +1505,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS); - t.timer.expires.tv64 = restart->nanosleep.expires; + hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); if (do_nanosleep(&t, HRTIMER_MODE_ABS)) goto out; @@ -1530,7 +1532,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, int ret = 0; hrtimer_init_on_stack(&t.timer, clockid, mode); - t.timer.expires = timespec_to_ktime(*rqtp); + hrtimer_set_expires(&t.timer, timespec_to_ktime(*rqtp)); if (do_nanosleep(&t, mode)) goto out; @@ -1550,7 +1552,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, restart->fn = hrtimer_nanosleep_restart; restart->nanosleep.index = t.timer.base->index; restart->nanosleep.rmtp = rmtp; - restart->nanosleep.expires = t.timer.expires.tv64; + restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); ret = -ERESTART_RESTARTBLOCK; out: @@ -1724,11 +1726,11 @@ int __sched schedule_hrtimeout(ktime_t *expires, } hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); - t.timer.expires = *expires; + hrtimer_set_expires(&t.timer, *expires); hrtimer_init_sleeper(&t, current); - hrtimer_start(&t.timer, t.timer.expires, mode); + hrtimer_start_expires(&t.timer, mode); if (!hrtimer_active(&t.timer)) t.task = NULL; diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d579..f85efcd 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -668,7 +668,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); - remaining = ktime_sub(timer->expires, now); + remaining = ktime_sub(hrtimer_get_expires(timer), now); /* Return 0 only, when the timer is expired and not pending */ if (remaining.tv64 <= 0) { /* @@ -762,7 +762,7 @@ common_timer_set(struct k_itimer *timr, int flags, hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); timr->it.real.timer.function = posix_timer_fn; - timer->expires = timespec_to_ktime(new_setting->it_value); + hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value)); /* Convert interval */ timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); @@ -771,14 +771,12 @@ common_timer_set(struct k_itimer *timr, int flags, if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { /* Setup correct expiry time for relative timers */ if (mode == HRTIMER_MODE_REL) { - timer->expires = - ktime_add_safe(timer->expires, - timer->base->get_time()); + hrtimer_add_expires(timer, timer->base->get_time()); } return 0; } - hrtimer_start(timer, timer->expires, mode); + hrtimer_start_expires(timer, mode); return 0; } diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 6522ae5..69d9cb9 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -631,8 +631,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) { - hrtimer_start(&timeout->timer, timeout->timer.expires, - HRTIMER_MODE_ABS); + hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } diff --git a/kernel/sched.c b/kernel/sched.c index 1a5f73c..e46b5af 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -221,9 +221,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) now = hrtimer_cb_get_time(&rt_b->rt_period_timer); hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); - hrtimer_start(&rt_b->rt_period_timer, - rt_b->rt_period_timer.expires, - HRTIMER_MODE_ABS); + hrtimer_start_expires(&rt_b->rt_period_timer, + HRTIMER_MODE_ABS); } spin_unlock(&rt_b->rt_runtime_lock); } @@ -1058,7 +1057,7 @@ static void hrtick_start(struct rq *rq, u64 delay) struct hrtimer *timer = &rq->hrtick_timer; ktime_t time = ktime_add_ns(timer->base->get_time(), delay); - timer->expires = time; + hrtimer_set_expires(timer, time); if (rq == this_rq()) { hrtimer_restart(timer); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5125ddd..4c8d854 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) time_state = TIME_OOP; printk(KERN_NOTICE "Clock: " "inserting leap second 23:59:60 UTC\n"); - leap_timer.expires = ktime_add_ns(leap_timer.expires, - NSEC_PER_SEC); + hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); res = HRTIMER_RESTART; break; case TIME_DEL: diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b046..b33be61 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -288,7 +288,7 @@ void tick_nohz_stop_sched_tick(int inidle) goto out; } - ts->idle_tick = ts->sched_timer.expires; + ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; rcu_enter_nohz(); @@ -419,21 +419,21 @@ void tick_nohz_restart_sched_tick(void) ts->tick_stopped = 0; ts->idle_exittime = now; hrtimer_cancel(&ts->sched_timer); - ts->sched_timer.expires = ts->idle_tick; + hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); while (1) { /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, tick_period); if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { - hrtimer_start(&ts->sched_timer, - ts->sched_timer.expires, + hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; } else { - if (!tick_program_event(ts->sched_timer.expires, 0)) + if (!tick_program_event( + hrtimer_get_expires(&ts->sched_timer), 0)) break; } /* Update jiffies and reread time */ @@ -446,7 +446,7 @@ void tick_nohz_restart_sched_tick(void) static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) { hrtimer_forward(&ts->sched_timer, now, tick_period); - return tick_program_event(ts->sched_timer.expires, 0); + return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); } /* @@ -529,7 +529,7 @@ static void tick_nohz_switch_to_nohz(void) next = tick_init_jiffy_update(); for (;;) { - ts->sched_timer.expires = next; + hrtimer_set_expires(&ts->sched_timer, next); if (!tick_program_event(next, 0)) break; next = ktime_add(next, tick_period); @@ -625,16 +625,15 @@ void tick_setup_sched_timer(void) ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; /* Get the next period (per cpu) */ - ts->sched_timer.expires = tick_init_jiffy_update(); + hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); offset = ktime_to_ns(tick_period) >> 1; do_div(offset, num_possible_cpus()); offset *= smp_processor_id(); - ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); + hrtimer_add_expires_ns(&ts->sched_timer, offset); for (;;) { hrtimer_forward(&ts->sched_timer, now, tick_period); - hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, - HRTIMER_MODE_ABS); + hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a40e20f..5224a32 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -66,8 +66,8 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) #endif SEQ_printf(m, "\n"); SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", - (unsigned long long)ktime_to_ns(timer->expires), - (long long)(ktime_to_ns(timer->expires) - now)); + (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)), + (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now)); } static void -- cgit v0.10.2 From 23446d1dc3d4f42a2b0fb82d4a098f9179ba486d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:18:10 -0700 Subject: hrtimer: convert powerpc/oprofile to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts powerpc/oprofile to these accessors. Signed-off-by: Arjan van de Ven diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 380d7e2..02ffe06 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c @@ -196,7 +196,7 @@ int start_spu_profiling(unsigned int cycles_reset) pr_debug("timer resolution: %lu\n", TICK_NSEC); kt = ktime_set(0, profiling_interval); hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - timer.expires = kt; + hrtimer_set_expires(&timer, kt); timer.function = profile_spus; /* Allocate arrays for collecting SPU PC samples */ -- cgit v0.10.2 From 18dd36af0010dd70c8634cdca0f99b47b5036c60 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:19:11 -0700 Subject: hrtimer: convert kvm-ia64 to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts KVM-ia64 to these accessors. Signed-off-by: Arjan van de Ven diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 7a37d06..cf8eae1 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1112,7 +1112,7 @@ static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) struct hrtimer *p_ht = &vcpu->arch.hlt_timer; if (hrtimer_cancel(p_ht)) - hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS); + hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); } static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) -- cgit v0.10.2 From 6c644eaeb2e000a08f0e20653b0835bb90a93e4a Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:20:30 -0700 Subject: hrtimer: convert s390 to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts s390 to these accessors. Signed-off-by: Arjan van de Ven diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 62b6b55..6f02f1e 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -659,9 +659,9 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, hr_time = ktime_set(0, poll_timeout); if (!hrtimer_is_queued(&ap_poll_timer) || - !hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) { - ap_poll_timer.expires = hr_time; - hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS); + !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { + hrtimer_set_expires(&ap_poll_timer, hr_time); + hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); } return count; } -- cgit v0.10.2 From 5c73a7d0411999e3cb3c6d64225450813738ae25 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:25:20 -0700 Subject: hrtimer: convert sound/ to the new hrtimer apis In order to be able to do range hrtimers we need to use accessor functions to the "expire" member of the hrtimer struct. This patch converts sound/ to these accessors. Signed-off-by: Arjan van de Ven diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c index e341f3f..1f42e40 100644 --- a/sound/drivers/pcsp/pcsp_lib.c +++ b/sound/drivers/pcsp/pcsp_lib.c @@ -34,7 +34,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) chip->thalf = 0; if (!atomic_read(&chip->timer_active)) return HRTIMER_NORESTART; - hrtimer_forward(&chip->timer, chip->timer.expires, + hrtimer_forward(&chip->timer, hrtimer_get_expires(&chip->timer), ktime_set(0, chip->ns_rem)); return HRTIMER_RESTART; } @@ -118,7 +118,8 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) chip->ns_rem = PCSP_PERIOD_NS(); ns = (chip->thalf ? PCSP_CALC_NS(timer_cnt) : chip->ns_rem); chip->ns_rem -= ns; - hrtimer_forward(&chip->timer, chip->timer.expires, ktime_set(0, ns)); + hrtimer_forward(&chip->timer, hrtimer_get_expires(&chip->timer), + ktime_set(0, ns)); return HRTIMER_RESTART; exit_nr_unlock2: -- cgit v0.10.2 From 799b64de256ea68fbb5db63bb55f61c305870643 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:27:58 -0700 Subject: hrtimer: rename the "expires" struct member to avoid accidental usage To catch code that still touches the "expires" memory directly, rename it to have the compiler complain rather than get nasty, hard to explain, runtime behavior Signed-off-by: Arjan van de Ven diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 9900e99..485a634 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -111,7 +111,7 @@ enum hrtimer_cb_mode { */ struct hrtimer { struct rb_node node; - ktime_t expires; + ktime_t _expires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; @@ -219,41 +219,41 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { - timer->expires = time; + timer->_expires = time; } static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) { - timer->expires.tv64 = tv64; + timer->_expires.tv64 = tv64; } static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) { - timer->expires = ktime_add_safe(timer->expires, time); + timer->_expires = ktime_add_safe(timer->_expires, time); } static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) { - timer->expires = ktime_add_ns(timer->expires, ns); + timer->_expires = ktime_add_ns(timer->_expires, ns); } static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) { - return timer->expires; + return timer->_expires; } static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) { - return timer->expires.tv64; + return timer->_expires.tv64; } static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) { - return ktime_to_ns(timer->expires); + return ktime_to_ns(timer->_expires); } static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) { - return ktime_sub(timer->expires, timer->base->get_time()); + return ktime_sub(timer->_expires, timer->base->get_time()); } /* @@ -334,7 +334,7 @@ static inline int hrtimer_start_expires(struct hrtimer *timer, static inline int hrtimer_restart(struct hrtimer *timer) { - return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); + return hrtimer_start(timer, timer->_expires, HRTIMER_MODE_ABS); } /* Query timers: */ -- cgit v0.10.2 From 654c8e0b1c623b156c5b92f28d914ab38c9c2c90 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:47:08 -0700 Subject: hrtimer: turn hrtimers into range timers this patch turns hrtimers into range timers; they have 2 expire points 1) the soft expire point 2) the hard expire point the kernel will do it's regular best effort attempt to get the timer run at the hard expire point. However, if some other time fires after the soft expire point, the kernel now has the freedom to fire this timer at this point, and thus grouping the events and preventing a power-expensive wakeup in the future. Signed-off-by: Arjan van de Ven diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 485a634..28259c3 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -112,6 +112,7 @@ enum hrtimer_cb_mode { struct hrtimer { struct rb_node node; ktime_t _expires; + ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; @@ -220,20 +221,37 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { timer->_expires = time; + timer->_softexpires = time; } + +static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) +{ + timer->_softexpires = time; + timer->_expires = ktime_add_safe(time, delta); +} + +static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) +{ + timer->_softexpires = time; + timer->_expires = ktime_add_safe(time, ns_to_ktime(delta)); +} + static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) { timer->_expires.tv64 = tv64; + timer->_softexpires.tv64 = tv64; } static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) { timer->_expires = ktime_add_safe(timer->_expires, time); + timer->_softexpires = ktime_add_safe(timer->_softexpires, time); } static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns) { timer->_expires = ktime_add_ns(timer->_expires, ns); + timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); } static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) @@ -241,10 +259,19 @@ static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) return timer->_expires; } +static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) +{ + return timer->_softexpires; +} + static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) { return timer->_expires.tv64; } +static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) +{ + return timer->_softexpires.tv64; +} static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) { @@ -334,7 +361,7 @@ static inline int hrtimer_start_expires(struct hrtimer *timer, static inline int hrtimer_restart(struct hrtimer *timer) { - return hrtimer_start(timer, timer->_expires, HRTIMER_MODE_ABS); + return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } /* Query timers: */ @@ -391,6 +418,8 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *tsk); +extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, + const enum hrtimer_mode mode); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); /* Soft interrupt function to run the hrtimer queues: */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ae307fe..0148300 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1309,7 +1309,20 @@ void hrtimer_interrupt(struct clock_event_device *dev) timer = rb_entry(node, struct hrtimer, node); - if (basenow.tv64 < hrtimer_get_expires_tv64(timer)) { + /* + * The immediate goal for using the softexpires is + * minimizing wakeups, not running timers at the + * earliest interrupt after their soft expiration. + * This allows us to avoid using a Priority Search + * Tree, which can answer a stabbing querry for + * overlapping intervals and instead use the simple + * BST we already have. + * We don't add extra wakeups by delaying timers that + * are right-of a not yet expired timer, because that + * timer will have to trigger a wakeup anyway. + */ + + if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { ktime_t expires; expires = ktime_sub(hrtimer_get_expires(timer), @@ -1681,14 +1694,20 @@ void __init hrtimers_init(void) } /** - * schedule_hrtimeout - sleep until timeout + * schedule_hrtimeout_range - sleep until timeout * @expires: timeout value (ktime_t) + * @delta: slack in expires timeout (ktime_t) * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL * * Make the current task sleep until the given expiry time has * elapsed. The routine will return immediately unless * the current task state has been set (see set_current_state()). * + * The @delta argument gives the kernel the freedom to schedule the + * actual wakeup to a time that is both power and performance friendly. + * The kernel give the normal best effort behavior for "@expires+@delta", + * but may decide to fire the timer earlier, but no earlier than @expires. + * * You can set the task state as follows - * * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to @@ -1702,7 +1721,7 @@ void __init hrtimers_init(void) * * Returns 0 when the timer has expired otherwise -EINTR */ -int __sched schedule_hrtimeout(ktime_t *expires, +int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, const enum hrtimer_mode mode) { struct hrtimer_sleeper t; @@ -1726,7 +1745,7 @@ int __sched schedule_hrtimeout(ktime_t *expires, } hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); - hrtimer_set_expires(&t.timer, *expires); + hrtimer_set_expires_range_ns(&t.timer, *expires, delta); hrtimer_init_sleeper(&t, current); @@ -1744,4 +1763,33 @@ int __sched schedule_hrtimeout(ktime_t *expires, return !t.task ? 0 : -EINTR; } +EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); + +/** + * schedule_hrtimeout - sleep until timeout + * @expires: timeout value (ktime_t) + * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL + * + * Make the current task sleep until the given expiry time has + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to + * pass before the routine returns. + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Returns 0 when the timer has expired otherwise -EINTR + */ +int __sched schedule_hrtimeout(ktime_t *expires, + const enum hrtimer_mode mode) +{ + return schedule_hrtimeout_range(expires, 0, mode); +} EXPORT_SYMBOL_GPL(schedule_hrtimeout); -- cgit v0.10.2 From 6976675d94042fbd446231d1bd8b7de71a980ada Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:52:40 -0700 Subject: hrtimer: create a "timer_slack" field in the task struct We want to be able to control the default "rounding" that is used by select() and poll() and friends. This is a per process property (so that we can have a "nice" like program to start certain programs with a looser or stricter rounding) that can be set/get via a prctl(). For this purpose, a field called "timer_slack_ns" is added to the task struct. In addition, a field called "default_timer_slack"ns" is added so that tasks easily can temporarily to a more/less accurate slack and then back to the default. The default value of the slack is set to 50 usec; this is significantly less than 2.6.27's average select() and poll() timing error but still allows the kernel to group timers somewhat to preserve power behavior. Applications and admins can override this via the prctl() Signed-off-by: Arjan van de Ven diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 021d8e7..23fd890 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -170,6 +170,7 @@ extern struct group_info init_groups; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 5ad7919..48d887e 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -78,4 +78,11 @@ #define PR_GET_SECUREBITS 27 #define PR_SET_SECUREBITS 28 +/* + * Get/set the timerslack as used by poll/select/nanosleep + * A value of 0 means "use default" + */ +#define PR_SET_TIMERSLACK 29 +#define PR_GET_TIMERSLACK 30 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3d9120c..dcc03fd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1301,6 +1301,12 @@ struct task_struct { int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; #endif + /* + * time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + unsigned long timer_slack_ns; + unsigned long default_timer_slack_ns; }; /* diff --git a/kernel/fork.c b/kernel/fork.c index 7ce2ebe..4308d75 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -987,6 +987,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->prev_utime = cputime_zero; p->prev_stime = cputime_zero; + p->default_timer_slack_ns = current->timer_slack_ns; + #ifdef CONFIG_DETECT_SOFTLOCKUP p->last_switch_count = 0; p->last_switch_timestamp = 0; diff --git a/kernel/sys.c b/kernel/sys.c index 038a7bc..1b96401 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1727,6 +1727,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, case PR_SET_TSC: error = SET_TSC_CTL(arg2); break; + case PR_GET_TIMERSLACK: + error = current->timer_slack_ns; + break; + case PR_SET_TIMERSLACK: + if (arg2 <= 0) + current->timer_slack_ns = + current->default_timer_slack_ns; + else + current->timer_slack_ns = arg2; + break; default: error = -EINVAL; break; -- cgit v0.10.2 From 90d6e24a3686325edea7748b966e138c9923017d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Sep 2008 15:55:35 -0700 Subject: hrtimer: make select() and poll() use the hrtimer range feature This patch makes the select() and poll() hrtimers use the new range feature and settings from the task struct. In addition, this includes the estimate_accuracy() function that Linus posted to lkml, but changed entirely based on other peoples lkml feedback. Signed-off-by: Arjan van de Ven diff --git a/fs/select.c b/fs/select.c index f6dceb5..5e61b43 100644 --- a/fs/select.c +++ b/fs/select.c @@ -28,6 +28,58 @@ #include + +/* + * Estimate expected accuracy in ns from a timeval. + * + * After quite a bit of churning around, we've settled on + * a simple thing of taking 0.1% of the timeout as the + * slack, with a cap of 100 msec. + * "nice" tasks get a 0.5% slack instead. + * + * Consider this comment an open invitation to come up with even + * better solutions.. + */ + +static unsigned long __estimate_accuracy(struct timespec *tv) +{ + unsigned long slack; + int divfactor = 1000; + + if (task_nice(current)) + divfactor = divfactor / 5; + + slack = tv->tv_nsec / divfactor; + slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); + + if (slack > 100 * NSEC_PER_MSEC) + slack = 100 * NSEC_PER_MSEC; + return slack; +} + +static unsigned long estimate_accuracy(struct timespec *tv) +{ + unsigned long ret; + struct timespec now; + + /* + * Realtime tasks get a slack of 0 for obvious reasons. + */ + + if (current->policy == SCHED_FIFO || + current->policy == SCHED_RR) + return 0; + + ktime_get_ts(&now); + now = timespec_sub(*tv, now); + ret = __estimate_accuracy(&now); + if (ret < current->timer_slack_ns) + return current->timer_slack_ns; + return ret; +} + + + struct poll_table_page { struct poll_table_page * next; struct poll_table_entry * entry; @@ -262,6 +314,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) struct poll_wqueues table; poll_table *wait; int retval, i, timed_out = 0; + unsigned long slack = 0; rcu_read_lock(); retval = max_select_fd(n, fds); @@ -278,6 +331,9 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) timed_out = 1; } + if (end_time) + slack = estimate_accuracy(end_time); + retval = 0; for (;;) { unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; @@ -353,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) to = &expire; } - if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; } __set_current_state(TASK_RUNNING); @@ -593,6 +649,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, poll_table* pt = &wait->pt; ktime_t expire, *to = NULL; int timed_out = 0, count = 0; + unsigned long slack = 0; /* Optimise the no-wait case */ if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { @@ -600,6 +657,9 @@ static int do_poll(unsigned int nfds, struct poll_list *list, timed_out = 1; } + if (end_time) + slack = estimate_accuracy(end_time); + for (;;) { struct poll_list *walk; @@ -646,7 +706,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, to = &expire; } - if (!schedule_hrtimeout(to, HRTIMER_MODE_ABS)) + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; } __set_current_state(TASK_RUNNING); -- cgit v0.10.2 From 584fb4a76413ec9215741e075e0dfb69173b213f Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Sep 2008 08:32:57 -0700 Subject: hrtimer: fix build bug found by Ingo in some randconfig configurations, hrtimers are used even though the hrtimer config if off; and it broke the build due to some of the new functions being on the wrong side of the ifdef. This patch moves the functions to the other side of the ifdef, fixing the build bug. Signed-off-by: Arjan van de Ven diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 28259c3..c407b33 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -198,13 +198,6 @@ struct hrtimer_cpu_base { #endif }; -#ifdef CONFIG_HIGH_RES_TIMERS -struct clock_event_device; - -extern void clock_was_set(void); -extern void hres_timers_resume(void); -extern void hrtimer_interrupt(struct clock_event_device *dev); - /* * In high resolution mode the time reference must be read accurate */ @@ -283,6 +276,13 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) return ktime_sub(timer->_expires, timer->base->get_time()); } +#ifdef CONFIG_HIGH_RES_TIMERS +struct clock_event_device; + +extern void clock_was_set(void); +extern void hres_timers_resume(void); +extern void hrtimer_interrupt(struct clock_event_device *dev); + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an -- cgit v0.10.2 From 2ec02270c00f94b08fddfb68c37510a9fb47ac7c Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Sep 2008 09:36:56 -0700 Subject: hrtimer: another build fix More randconfig testing Signed-off-by: Arjan van de Ven diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index c407b33..4c1a834 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -198,19 +198,6 @@ struct hrtimer_cpu_base { #endif }; -/* - * In high resolution mode the time reference must be read accurate - */ -static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) -{ - return timer->base->get_time(); -} - -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return timer->base->cpu_base->hres_active; -} - static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { timer->_expires = time; @@ -284,6 +271,19 @@ extern void hres_timers_resume(void); extern void hrtimer_interrupt(struct clock_event_device *dev); /* + * In high resolution mode the time reference must be read accurate + */ +static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +{ + return timer->base->get_time(); +} + +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return timer->base->cpu_base->hres_active; +} + +/* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an * idea of the (in)accuracy of timers. Timer values are rounded up to -- cgit v0.10.2 From 7e6e178ab1548c8d894a77593e757acf4510b8ba Mon Sep 17 00:00:00 2001 From: Pawel MOLL Date: Mon, 1 Sep 2008 10:12:11 +0100 Subject: genirq: irq_chip->startup() usage in setup_irq and set_irq_chained handler This patch clarifies usage of irq_chip->startup() callback: 1. The "if (startup) startup(); else enabled();" code in setup_irq() is unnecessary, as startup() falls back to enabled() via default callbacks, set by irq_chip_set_defaults(). 2. When using set_irq_chained_handler() the startup() was never called, which is not good at all... Fixed. And again - when startup() is not defined the call will fall back to enable() than to unmask() via default callbacks. Signed-off-by: Pawel Moll Acked-by: Benjamin Herrenschmidt Signed-off-by: Ingo Molnar diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 964964b..240c64d 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -587,7 +587,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, desc->status &= ~IRQ_DISABLED; desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; desc->depth = 0; - desc->chip->unmask(irq); + desc->chip->startup(irq); } spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ae1b684..9aa3e7b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -397,10 +397,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) if (!(desc->status & IRQ_NOAUTOEN)) { desc->depth = 0; desc->status &= ~IRQ_DISABLED; - if (desc->chip->startup) - desc->chip->startup(irq); - else - desc->chip->enable(irq); + desc->chip->startup(irq); } else /* Undo nested disables: */ desc->depth = 1; -- cgit v0.10.2 From da8f2e170ea94cc20f8ebbc8ee8d127edb8f12f1 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 7 Sep 2008 10:47:46 -0700 Subject: hrtimer: add a hrtimer_start_range() function this patch adds a _range version of hrtimer_start() so that range timers can be created; the hrtimer_start() function is just a wrapper around this. In addition, hrtimer_start_expires() will now preserve existing ranges. Signed-off-by: Arjan van de Ven diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 4c1a834..1c0473e 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -350,13 +350,20 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } /* Basic timer operations: */ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode); +extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long range_ns, const enum hrtimer_mode mode); extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); static inline int hrtimer_start_expires(struct hrtimer *timer, enum hrtimer_mode mode) { - return hrtimer_start(timer, hrtimer_get_expires(timer), mode); + unsigned long delta; + ktime_t soft, hard; + soft = hrtimer_get_softexpires(timer); + hard = hrtimer_get_expires(timer); + delta = ktime_to_ns(ktime_sub(hard, soft)); + return hrtimer_start_range_ns(timer, hrtimer_get_expires(timer), delta, mode); } static inline int hrtimer_restart(struct hrtimer *timer) diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 0148300..a022209 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -945,9 +945,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) } /** - * hrtimer_start - (re)start an relative timer on the current CPU + * hrtimer_start_range_ns - (re)start an relative timer on the current CPU * @timer: the timer to be added * @tim: expiry time + * @delta_ns: "slack" range for the timer * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) * * Returns: @@ -955,7 +956,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) * 1 when the timer was active */ int -hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) +hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, + const enum hrtimer_mode mode) { struct hrtimer_clock_base *base, *new_base; unsigned long flags; @@ -983,7 +985,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) #endif } - hrtimer_set_expires(timer, tim); + hrtimer_set_expires_range_ns(timer, tim, delta_ns); timer_stats_hrtimer_set_start_info(timer); @@ -1016,8 +1018,26 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) return ret; } +EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); + +/** + * hrtimer_start - (re)start an relative timer on the current CPU + * @timer: the timer to be added + * @tim: expiry time + * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) + * + * Returns: + * 0 on success + * 1 when the timer was active + */ +int +hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) +{ + return hrtimer_start_range_ns(timer, tim, 0, mode); +} EXPORT_SYMBOL_GPL(hrtimer_start); + /** * hrtimer_try_to_cancel - try to deactivate a timer * @timer: hrtimer to stop -- cgit v0.10.2 From 4ce105d30e08fb8a1783c55a0e48aa3fa200c455 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 7 Sep 2008 15:31:39 -0700 Subject: hrtimer: incorporate feedback from Peter Zijlstra (based on lkml review) * use rt_task() * task_nice() has a sign Signed-off-by: Arjan van de Ven diff --git a/fs/select.c b/fs/select.c index 5e61b43..fdd8584 100644 --- a/fs/select.c +++ b/fs/select.c @@ -46,7 +46,7 @@ static unsigned long __estimate_accuracy(struct timespec *tv) unsigned long slack; int divfactor = 1000; - if (task_nice(current)) + if (task_nice(current) > 0) divfactor = divfactor / 5; slack = tv->tv_nsec / divfactor; @@ -66,8 +66,7 @@ static unsigned long estimate_accuracy(struct timespec *tv) * Realtime tasks get a slack of 0 for obvious reasons. */ - if (current->policy == SCHED_FIFO || - current->policy == SCHED_RR) + if (rt_task(current)) return 0; ktime_get_ts(&now); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1c0473e..95db11f 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -363,7 +363,7 @@ static inline int hrtimer_start_expires(struct hrtimer *timer, soft = hrtimer_get_softexpires(timer); hard = hrtimer_get_expires(timer); delta = ktime_to_ns(ktime_sub(hard, soft)); - return hrtimer_start_range_ns(timer, hrtimer_get_expires(timer), delta, mode); + return hrtimer_start_range_ns(timer, soft, delta, mode); } static inline int hrtimer_restart(struct hrtimer *timer) -- cgit v0.10.2 From 704af52bd13a5d9f3c60c496c68e752fafdfb434 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 7 Sep 2008 16:10:20 -0700 Subject: hrtimer: show the timer ranges in /proc/timer_list to help debugging and visibility of timer ranges, show them in the existing timer list in /proc/timer_list Signed-off-by: Arjan van de Ven diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 5224a32..122ee75 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -65,8 +65,10 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); #endif SEQ_printf(m, "\n"); - SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", + SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", + (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)), (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)), + (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now), (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now)); } -- cgit v0.10.2 From 96d2ab484e7a9bafdab44b8c7d1ef5944319b18c Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 7 Sep 2008 16:08:55 -0700 Subject: hrtimer: fix signed/unsigned bug in slack estimator the slack estimator used unsigned math; however for very short delay it's possible that by the time you calculate the timeout, it's already passed and you get a negative time/slack... in an unsigned variable... which then gets turned into a 100 msec delay rather than zero. This patch fixes this by using a signed typee in the right places. Signed-off-by: Arjan van de Ven diff --git a/fs/select.c b/fs/select.c index fdd8584..448e440 100644 --- a/fs/select.c +++ b/fs/select.c @@ -41,9 +41,9 @@ * better solutions.. */ -static unsigned long __estimate_accuracy(struct timespec *tv) +static long __estimate_accuracy(struct timespec *tv) { - unsigned long slack; + long slack; int divfactor = 1000; if (task_nice(current) > 0) @@ -54,10 +54,13 @@ static unsigned long __estimate_accuracy(struct timespec *tv) if (slack > 100 * NSEC_PER_MSEC) slack = 100 * NSEC_PER_MSEC; + + if (slack < 0) + slack = 0; return slack; } -static unsigned long estimate_accuracy(struct timespec *tv) +static long estimate_accuracy(struct timespec *tv) { unsigned long ret; struct timespec now; @@ -330,7 +333,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) timed_out = 1; } - if (end_time) + if (end_time && !timed_out) slack = estimate_accuracy(end_time); retval = 0; @@ -656,7 +659,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list, timed_out = 1; } - if (end_time) + if (end_time && !timed_out) slack = estimate_accuracy(end_time); for (;;) { -- cgit v0.10.2 From 742fd1bcfb475c702c9b1dd6afc79c08f8dbf7dd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Aug 2008 17:36:12 +0900 Subject: sh: Provide movli.l/movco.l-based bitops. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h new file mode 100644 index 0000000..43b8e1a --- /dev/null +++ b/arch/sh/include/asm/bitops-llsc.h @@ -0,0 +1,144 @@ +#ifndef __ASM_SH_BITOPS_LLSC_H +#define __ASM_SH_BITOPS_LLSC_H + +static inline void set_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! set_bit \n\t" + "or %3, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + : "=&z" (tmp), "=r" (a) + : "1" (a), "r" (mask) + : "t", "memory" + ); +} + +static inline void clear_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! clear_bit \n\t" + "and %3, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + : "=&z" (tmp), "=r" (a) + : "1" (a), "r" (~mask) + : "t", "memory" + ); +} + +static inline void change_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! change_bit \n\t" + "xor %3, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + : "=&z" (tmp), "=r" (a) + : "1" (a), "r" (mask) + : "t", "memory" + ); +} + +static inline int test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! test_and_set_bit \n\t" + "mov %0, %2 \n\t" + "or %4, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + "and %4, %2 \n\t" + : "=&z" (tmp), "=r" (a), "=&r" (retval) + : "1" (a), "r" (mask) + : "t", "memory" + ); + + return retval != 0; +} + +static inline int test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! test_and_clear_bit \n\t" + "mov %0, %2 \n\t" + "and %5, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + "and %4, %2 \n\t" + "synco \n\t" + : "=&z" (tmp), "=r" (a), "=&r" (retval) + : "1" (a), "r" (mask), "r" (~mask) + : "t", "memory" + ); + + return retval != 0; +} + +static inline int test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! test_and_change_bit \n\t" + "mov %0, %2 \n\t" + "xor %4, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + "and %4, %2 \n\t" + "synco \n\t" + : "=&z" (tmp), "=r" (a), "=&r" (retval) + : "1" (a), "r" (mask) + : "t", "memory" + ); + + return retval != 0; +} + +#endif /* __ASM_SH_BITOPS_LLSC_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index d7d382f..367930d 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -13,6 +13,8 @@ #ifdef CONFIG_GUSA_RB #include +#elif defined(CONFIG_CPU_SH4A) +#include #else #include #endif -- cgit v0.10.2 From ee43a8442bd7a5d611f11958e6f8c8953d26f907 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Aug 2008 18:01:43 +0900 Subject: sh: Provide movli.l/movco.l-based cmpxchg. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h new file mode 100644 index 0000000..aee3bf2 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -0,0 +1,71 @@ +#ifndef __ASM_SH_CMPXCHG_LLSC_H +#define __ASM_SH_CMPXCHG_LLSC_H + +static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) +{ + unsigned long retval; + unsigned long tmp; + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! xchg_u32 \n\t" + "mov %0, %2 \n\t" + "mov %4, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + "synco \n\t" + : "=&z"(tmp), "=r" (m), "=&r" (retval) + : "1" (m), "r" (val) + : "t", "memory" + ); + + return retval; +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + unsigned long retval; + unsigned long tmp; + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! xchg_u8 \n\t" + "mov %0, %2 \n\t" + "mov %4, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + "synco \n\t" + : "=&z"(tmp), "=r" (m), "=&r" (retval) + : "1" (m), "r" (val & 0xff) + : "t", "memory" + ); + + return retval; +} + +static inline unsigned long +__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) +{ + unsigned long retval; + unsigned long tmp; + + __asm__ __volatile__ ( + "1: \n\t" + "movli.l @%1, %0 ! __cmpxchg_u32 \n\t" + "mov %0, %2 \n\t" + "cmp/eq %2, %4 \n\t" + "bf 2f \n\t" + "mov %5, %0 \n\t" + "2: \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b \n\t" + "synco \n\t" + : "=&z" (tmp), "=r" (m), "=&r" (retval) + : "1" (m), "r" (old), "r" (new) + : "t", "memory" + ); + + return retval; +} + +#endif /* __ASM_SH_CMPXCHG_LLSC_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 056d68c..fbac113 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -70,6 +70,8 @@ #ifdef CONFIG_GUSA_RB #include +#elif defined(CONFIG_CPU_SH4A) +#include #else #include #endif -- cgit v0.10.2 From 04ec080dcaad7d3c6d0b40b599c8e63da618f784 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 6 Aug 2008 17:29:14 +0900 Subject: sh: Kill off unused defines from asm/smp.h. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 593343c..e2b79e6 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -21,11 +21,6 @@ extern int __cpu_number_map[NR_CPUS]; extern int __cpu_logical_map[NR_CPUS]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] -/* I've no idea what the real meaning of this is */ -#define PROC_CHANGE_PENALTY 20 - -#define NO_PROC_ID (-1) - #define SMP_MSG_FUNCTION 0 #define SMP_MSG_RESCHEDULE 1 #define SMP_MSG_FUNCTION_SINGLE 2 -- cgit v0.10.2 From 173a44dd1f406e9aa6fcf46c83b7c972d10ec930 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 6 Aug 2008 18:02:48 +0900 Subject: sh: smp: Provide a generic IPI handler. This provides a generic smp_message_recv() routine (based on the PPC one), that IPI IRQs can wrap in to. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index e2b79e6..1292c6d 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -26,6 +26,7 @@ extern int __cpu_logical_map[NR_CPUS]; #define SMP_MSG_FUNCTION_SINGLE 2 #define SMP_MSG_NR 3 +void smp_message_recv(unsigned int msg); void plat_smp_setup(void); void plat_prepare_cpus(unsigned int max_cpus); int plat_smp_processor_id(void); diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 60c5084..ebfdd36 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -3,7 +3,7 @@ * * SMP support for the SuperH processors. * - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2008 Paul Mundt * Copyright (C) 2006 - 2007 Akio Idehara * * This file is subject to the terms and conditions of the GNU General Public @@ -184,6 +184,24 @@ void arch_send_call_function_single_ipi(int cpu) plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); } +void smp_message_recv(unsigned int msg) +{ + switch (msg) { + case SMP_MSG_FUNCTION: + generic_smp_call_function_interrupt(); + break; + case SMP_MSG_RESCHEDULE: + break; + case SMP_MSG_FUNCTION_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + default: + printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", + smp_processor_id(), __func__, msg); + break; + } +} + /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { -- cgit v0.10.2 From c7936b9abcf5e043e73f183a37e81787f6178dd0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 6 Aug 2008 18:05:09 +0900 Subject: sh: smp: Hook in to the generic IPI handler for SH-X3 SMP. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 1292c6d..5ebe0d0 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -32,8 +32,6 @@ void plat_prepare_cpus(unsigned int max_cpus); int plat_smp_processor_id(void); void plat_start_cpu(unsigned int cpu, unsigned long entry_point); void plat_send_ipi(unsigned int cpu, unsigned int message); -int plat_register_ipi_handler(unsigned int message, - void (*handler)(void *), void *arg); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi(cpumask_t mask); diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index e5e0684..edb4da0 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c @@ -1,7 +1,7 @@ /* * SH-X3 SMP * - * Copyright (C) 2007 Paul Mundt + * Copyright (C) 2007 - 2008 Paul Mundt * Copyright (C) 2007 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public @@ -14,6 +14,22 @@ #include #include +static irqreturn_t ipi_interrupt_handler(int irq, void *arg) +{ + unsigned int message = (unsigned int)(long)arg; + unsigned int cpu = hard_smp_processor_id(); + unsigned int offs = 4 * cpu; + unsigned int x; + + x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ + x &= (1 << (message << 2)); + ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ + + smp_message_recv(message); + + return IRQ_HANDLED; +} + void __init plat_smp_setup(void) { unsigned int cpu = 0; @@ -40,6 +56,13 @@ void __init plat_smp_setup(void) void __init plat_prepare_cpus(unsigned int max_cpus) { + int i; + + BUILD_BUG_ON(SMP_MSG_NR >= 8); + + for (i = 0; i < SMP_MSG_NR; i++) + request_irq(104 + i, ipi_interrupt_handler, IRQF_DISABLED, + "IPI", (void *)(long)i); } #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) @@ -75,46 +98,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message) unsigned long addr = 0xfe410070 + (cpu * 4); BUG_ON(cpu >= 4); - BUG_ON(message >= SMP_MSG_NR); ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ } - -struct ipi_data { - void (*handler)(void *); - void *arg; - unsigned int message; -}; - -static irqreturn_t ipi_interrupt_handler(int irq, void *arg) -{ - struct ipi_data *id = arg; - unsigned int cpu = hard_smp_processor_id(); - unsigned int offs = 4 * cpu; - unsigned int x; - - x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ - x &= (1 << (id->message << 2)); - ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ - - id->handler(id->arg); - - return IRQ_HANDLED; -} - -static struct ipi_data ipi_handlers[SMP_MSG_NR]; - -int plat_register_ipi_handler(unsigned int message, - void (*handler)(void *), void *arg) -{ - struct ipi_data *id = &ipi_handlers[message]; - - BUG_ON(SMP_MSG_NR >= 8); - BUG_ON(message >= SMP_MSG_NR); - - id->handler = handler; - id->arg = arg; - id->message = message; - - return request_irq(104 + message, ipi_interrupt_handler, 0, "IPI", id); -} -- cgit v0.10.2 From 6f52707e6882eb3bc6920c3f59beb05d23d68354 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 6 Aug 2008 18:21:03 +0900 Subject: sh: smp: Hook up a timer IPI stub. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 5ebe0d0..9d22cda 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -21,19 +21,26 @@ extern int __cpu_number_map[NR_CPUS]; extern int __cpu_logical_map[NR_CPUS]; #define cpu_logical_map(cpu) __cpu_logical_map[cpu] -#define SMP_MSG_FUNCTION 0 -#define SMP_MSG_RESCHEDULE 1 -#define SMP_MSG_FUNCTION_SINGLE 2 -#define SMP_MSG_NR 3 +enum { + SMP_MSG_FUNCTION, + SMP_MSG_RESCHEDULE, + SMP_MSG_FUNCTION_SINGLE, + SMP_MSG_TIMER, + + SMP_MSG_NR, /* must be last */ +}; void smp_message_recv(unsigned int msg); +void smp_timer_broadcast(cpumask_t mask); + void plat_smp_setup(void); void plat_prepare_cpus(unsigned int max_cpus); int plat_smp_processor_id(void); void plat_start_cpu(unsigned int cpu, unsigned long entry_point); void plat_send_ipi(unsigned int cpu, unsigned int message); -extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); + +void arch_send_call_function_single_ipi(int cpu); +void arch_send_call_function_ipi(cpumask_t mask); #else diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index ebfdd36..9cb3734 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -184,6 +184,21 @@ void arch_send_call_function_single_ipi(int cpu) plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); } +void smp_timer_broadcast(cpumask_t mask) +{ + int cpu; + + for_each_cpu_mask(cpu, mask) + plat_send_ipi(cpu, SMP_MSG_TIMER); +} + +static void ipi_timer(void) +{ + irq_enter(); + /* XXX ... */ + irq_exit(); +} + void smp_message_recv(unsigned int msg) { switch (msg) { @@ -195,6 +210,9 @@ void smp_message_recv(unsigned int msg) case SMP_MSG_FUNCTION_SINGLE: generic_smp_call_function_single_interrupt(); break; + case SMP_MSG_TIMER: + ipi_timer(); + break; default: printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", smp_processor_id(), __func__, msg); -- cgit v0.10.2 From 8c24594deab89a484879bee270e948f0a556ed75 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 6 Aug 2008 18:37:07 +0900 Subject: sh: generic clockevent broadcast support. This hooks up GENERIC_CLOCKEVENTS_BROADCAST and a dummy local timer, which we call in to from the timer IPI when no other local timer is provided. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5131d50..399664c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -66,6 +66,9 @@ config GENERIC_TIME config GENERIC_CLOCKEVENTS def_bool n +config GENERIC_CLOCKEVENTS_BROADCAST + bool + config GENERIC_LOCKBREAK def_bool y depends on SMP && PREEMPT @@ -323,6 +326,7 @@ config CPU_SUBTYPE_SHX3 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_SMP + select GENERIC_CLOCKEVENTS_BROADCAST # SH4AL-DSP Processor Support diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 9d22cda..85b660c 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -33,6 +33,9 @@ enum { void smp_message_recv(unsigned int msg); void smp_timer_broadcast(cpumask_t mask); +void local_timer_interrupt(void); +void local_timer_setup(unsigned int cpu); + void plat_smp_setup(void); void plat_prepare_cpus(unsigned int max_cpus); int plat_smp_processor_id(void); diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 9cb3734..c55d314 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -84,9 +84,12 @@ asmlinkage void __cpuinit start_secondary(void) local_irq_enable(); + cpu = smp_processor_id(); + + /* Enable local timers */ + local_timer_setup(cpu); calibrate_delay(); - cpu = smp_processor_id(); smp_store_cpu_info(cpu); cpu_set(cpu, cpu_online_map); @@ -195,7 +198,7 @@ void smp_timer_broadcast(cpumask_t mask) static void ipi_timer(void) { irq_enter(); - /* XXX ... */ + local_timer_interrupt(); irq_exit(); } diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index 0758b5e..decee0a 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -1,9 +1,9 @@ /* - * arch/sh/kernel/time.c + * arch/sh/kernel/time_32.c * * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * Copyright (C) 2000 Philipp Rumpf - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2008 Paul Mundt * Copyright (C) 2002 M. R. Brown * * Some code taken from i386 version. @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -260,6 +261,10 @@ void __init time_init(void) sys_timer = get_sys_timer(); printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + local_timer_setup(smp_processor_id()); +#endif + if (sys_timer->ops->read) clocksource_sh.read = sys_timer->ops->read; diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile index bcf244f..0b7f857 100644 --- a/arch/sh/kernel/timers/Makefile +++ b/arch/sh/kernel/timers/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_SH_TMU) += timer-tmu.o obj-$(CONFIG_SH_MTU2) += timer-mtu2.o obj-$(CONFIG_SH_CMT) += timer-cmt.o +obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += timer-broadcast.o diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c new file mode 100644 index 0000000..c231763 --- /dev/null +++ b/arch/sh/kernel/timers/timer-broadcast.c @@ -0,0 +1,57 @@ +/* + * Dummy local timer + * + * Copyright (C) 2008 Paul Mundt + * + * cloned from: + * + * linux/arch/arm/mach-realview/localtimer.c + * + * Copyright (C) 2002 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); + +/* + * Used on SMP for either the local timer or SMP_MSG_TIMER + */ +void local_timer_interrupt(void) +{ + struct clock_event_device *clk = &__get_cpu_var(local_clockevent); + + clk->event_handler(clk); +} + +static void dummy_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +{ +} + +void __cpuinit local_timer_setup(unsigned int cpu) +{ + struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); + + clk->name = "dummy_timer"; + clk->features = CLOCK_EVT_FEAT_DUMMY; + clk->rating = 200; + clk->mult = 1; + clk->set_mode = dummy_timer_set_mode; + clk->broadcast = smp_timer_broadcast; + clk->cpumask = cpumask_of_cpu(cpu); + + clockevents_register_device(clk); +} -- cgit v0.10.2 From 71f0bdcab69ab36b1e939d36063aaf6c4a164ed3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 6 Aug 2008 18:39:32 +0900 Subject: sh: smp: shove a cpu_relax() in the plat_start_cpu() busy loop. Without this, certain versions of GCC will happily optimize the entire loop out. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index edb4da0..b8869aa 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c @@ -82,7 +82,7 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point) ctrl_outl(STBCR_MSTP, STBCR_REG(cpu)); while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) - ; + cpu_relax(); /* Start up secondary processor by sending a reset */ ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu)); -- cgit v0.10.2 From 53c01d2dc38cd3cfaf5591ec5c6c9c4e437cfec2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 8 Aug 2008 01:18:48 +0900 Subject: sh: Early dummy clockevent registration on boot CPU. The dummy timer needs to be registered on the boot CPU before the system timer clockevent is registered, or broadcasting doesn't work as advertized. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index decee0a..e2f74cc 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -254,6 +254,10 @@ void __init time_init(void) set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + local_timer_setup(smp_processor_id()); +#endif + /* * Find the timer to use as the system timer, it will be * initialized for us. @@ -261,9 +265,6 @@ void __init time_init(void) sys_timer = get_sys_timer(); printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST - local_timer_setup(smp_processor_id()); -#endif if (sys_timer->ops->read) clocksource_sh.read = sys_timer->ops->read; -- cgit v0.10.2 From 7d96169cb769f459dd6730b06fa3a88cb0c9297d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 8 Aug 2008 01:23:34 +0900 Subject: sh: Display CPU information in show_regs(). Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 3326a45..7326313 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -111,15 +111,21 @@ void show_regs(struct pt_regs * regs) { printk("\n"); printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm); + printk("CPU : %d %s (%s %.*s)\n", + smp_processor_id(), print_tainted(), init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + print_symbol("PC is at %s\n", instruction_pointer(regs)); + print_symbol("PR is at %s\n", regs->pr); + printk("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, regs->regs[15], regs->sr); #ifdef CONFIG_MMU - printk("TEA : %08x ", ctrl_inl(MMU_TEA)); + printk("TEA : %08x\n", ctrl_inl(MMU_TEA)); #else - printk(" "); + printk("\n"); #endif - printk("%s\n", print_tainted()); printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", regs->regs[0],regs->regs[1], -- cgit v0.10.2 From fa43972fab24a3c050e880a7831f9378c6cebc0b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 4 Sep 2008 18:53:58 +0900 Subject: sh: fixup many sparse errors. Signed-off-by: Paul Mundt diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c index f386997..efdba6b 100644 --- a/arch/sh/boot/compressed/misc_32.c +++ b/arch/sh/boot/compressed/misc_32.c @@ -191,7 +191,7 @@ long* stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { - output_data = 0; + output_data = NULL; output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); #ifdef CONFIG_29BIT output_ptr |= P2SEG; diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h index 720dfab..f9c8858 100644 --- a/arch/sh/include/asm/clock.h +++ b/arch/sh/include/asm/clock.h @@ -39,6 +39,7 @@ struct clk { /* Should be defined by processor-specific code */ void arch_init_clk_ops(struct clk_ops **, int type); +int __init arch_clk_init(void); /* arch/sh/kernel/cpu/clock.c */ int clk_init(void); diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index a4fbf0c..e49cfee 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -194,6 +194,8 @@ __BUILD_MEMORY_STRING(w, u16) #define IO_SPACE_LIMIT 0xffffffff +extern unsigned long generic_io_base; + /* * This function provides a method for the generic case where a board-specific * ioport_map simply needs to return the port + some arbitrary port base. @@ -203,8 +205,6 @@ __BUILD_MEMORY_STRING(w, u16) */ static inline void __set_io_port_base(unsigned long pbase) { - extern unsigned long generic_io_base; - generic_io_base = pbase; } diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 6195a53..d319baa 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h @@ -41,6 +41,9 @@ static inline int generic_irq_demux(int irq) #define irq_canonicalize(irq) (irq) #define irq_demux(irq) sh_mv.mv_irq_demux(irq) +void init_IRQ(void); +asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); + #ifdef CONFIG_IRQSTACKS extern void irq_ctx_init(int cpu); extern void irq_ctx_exit(int cpu); diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 15d9f92..58e2be5 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -45,9 +45,13 @@ enum cpu_type { /* Forward decl */ struct sh_cpuinfo; +struct seq_operations; + +extern struct pt_regs fake_swapper_regs; /* arch/sh/kernel/setup.c */ const char *get_cpu_subtype(struct sh_cpuinfo *c); +extern const struct seq_operations cpuinfo_op; #ifdef CONFIG_VSYSCALL int vsyscall_init(void); diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 0dadd75..41d2321 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -10,6 +10,7 @@ #ifdef __KERNEL__ #include +#include #include #include #include @@ -44,6 +45,8 @@ extern struct sh_cpuinfo cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] +asmlinkage void __init sh_cpu_init(void); + /* * User space process size: 2GB. * diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 770d516..16609bc 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -169,8 +169,6 @@ struct thread_struct { #define INIT_MMAP \ { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } -extern struct pt_regs fake_swapper_regs; - #define INIT_THREAD { \ .sp = sizeof(init_stack) + \ (long) &init_stack, \ diff --git a/arch/sh/include/asm/rtc.h b/arch/sh/include/asm/rtc.h index 1813f42..f7b010d 100644 --- a/arch/sh/include/asm/rtc.h +++ b/arch/sh/include/asm/rtc.h @@ -1,6 +1,7 @@ #ifndef _ASM_RTC_H #define _ASM_RTC_H +void time_init(void); extern void (*board_time_init)(void); extern void (*rtc_sh_get_time)(struct timespec *); extern int (*rtc_sh_set_time)(const time_t); diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index 55a2bd3..554f865 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -1,6 +1,8 @@ #ifndef _SH_SETUP_H #define _SH_SETUP_H +#include + #define COMMAND_LINE_SIZE 256 #ifdef __KERNEL__ diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h new file mode 100644 index 0000000..c1e2b8d --- /dev/null +++ b/arch/sh/include/asm/syscalls.h @@ -0,0 +1,25 @@ +#ifndef __ASM_SH_SYSCALLS_H +#define __ASM_SH_SYSCALLS_H + +#ifdef __KERNEL__ + +struct old_utsname; + +asmlinkage int old_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + int fd, unsigned long off); +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +asmlinkage int sys_ipc(uint call, int first, int second, + int third, void __user *ptr, long fifth); +asmlinkage int sys_uname(struct old_utsname __user *name); + +#ifdef CONFIG_SUPERH32 +# include "syscalls_32.h" +#else +# include "syscalls_64.h" +#endif + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_SYSCALLS_H */ diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h new file mode 100644 index 0000000..104c5e6 --- /dev/null +++ b/arch/sh/include/asm/syscalls_32.h @@ -0,0 +1,56 @@ +#ifndef __ASM_SH_SYSCALLS_32_H +#define __ASM_SH_SYSCALLS_32_H + +#ifdef __KERNEL__ + +#include +#include +#include + +struct pt_regs; + +asmlinkage int sys_fork(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, + unsigned long parent_tidptr, + unsigned long child_tidptr, + struct pt_regs __regs); +asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, + char __user * __user *uenvp, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, + struct old_sigaction __user *oact); +asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, + size_t count, long dummy, loff_t pos); +asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, + size_t count, long dummy, loff_t pos); +asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1, + u32 len0, u32 len1, int advice); + +/* Misc syscall related bits */ +asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); +asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, + unsigned long thread_info_flags); + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_SYSCALLS_32_H */ diff --git a/arch/sh/include/asm/syscalls_64.h b/arch/sh/include/asm/syscalls_64.h new file mode 100644 index 0000000..751fd88 --- /dev/null +++ b/arch/sh/include/asm/syscalls_64.h @@ -0,0 +1,34 @@ +#ifndef __ASM_SH_SYSCALLS_64_H +#define __ASM_SH_SYSCALLS_64_H + +#ifdef __KERNEL__ + +#include +#include +#include + +struct pt_regs; + +asmlinkage int sys_fork(unsigned long r2, unsigned long r3, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs); +asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs); +asmlinkage int sys_vfork(unsigned long r2, unsigned long r3, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs); +asmlinkage int sys_execve(char *ufilename, char **uargv, + char **uenvp, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs); + +/* Misc syscall related bits */ +asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs); +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); + +#endif /* __KERNEL__ */ +#endif /* __ASM_SH_SYSCALLS_64_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index fbac113..6160fe4 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -127,6 +127,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, }) extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); +void free_initmem(void); +void free_initrd_mem(unsigned long start, unsigned long end); extern void *set_exception_table_vec(unsigned int vec, void *handler); @@ -179,8 +181,8 @@ BUILD_TRAP_HANDLER(fpu_state_restore); #define arch_align_stack(x) (x) struct mem_access { - unsigned long (*from)(void *dst, const void *src, unsigned long cnt); - unsigned long (*to)(void *dst, const void *src, unsigned long cnt); + unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt); + unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt); }; #ifdef CONFIG_SUPERH32 diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index f11bcf0..16509ed 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -99,4 +99,20 @@ do { \ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, struct mem_access *ma); +asmlinkage void do_address_error(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address); +asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); + #endif /* __ASM_SH_SYSTEM_32_H */ diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index f5eb56e..b7e46d5 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -294,9 +294,10 @@ arch_init_clk_ops(struct clk_ops **ops, int type) { } -void __init __attribute__ ((weak)) +int __init __attribute__ ((weak)) arch_clk_init(void) { + return 0; } static int show_clocks(char *buf, char **start, off_t off, @@ -331,7 +332,7 @@ int __init clk_init(void) ret |= clk_register(clk); } - arch_clk_init(); + ret |= arch_clk_init(); /* Kick the child clocks.. */ propagate_rate(&master_clk); diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index db76944..f1b214d 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c @@ -81,7 +81,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count) volatile u8 *port_addr; u8 *buf = dst; - port_addr = (volatile u8 *)__ioport_map(port, 1); + port_addr = (volatile u8 __force *)__ioport_map(port, 1); while (count--) *buf++ = *port_addr; } @@ -91,7 +91,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count) volatile u16 *port_addr; u16 *buf = dst; - port_addr = (volatile u16 *)__ioport_map(port, 2); + port_addr = (volatile u16 __force *)__ioport_map(port, 2); while (count--) *buf++ = *port_addr; @@ -103,7 +103,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count) volatile u32 *port_addr; u32 *buf = dst; - port_addr = (volatile u32 *)__ioport_map(port, 4); + port_addr = (volatile u32 __force *)__ioport_map(port, 4); while (count--) *buf++ = *port_addr; diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index 129b2cf..8bfdd27 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 7326313..914e543 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -26,6 +26,7 @@ #include #include #include +#include static int hlt_counter; int ubc_usercnt = 0; diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index b9dbd2d..d0dddc4 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 035cb30..84bf342 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * does not yet catch signals sent when the child dies. @@ -105,6 +106,7 @@ void ptrace_disable(struct task_struct *child) long arch_ptrace(struct task_struct *child, long request, long addr, long data) { struct user * dummy = NULL; + unsigned long __user *datap = (unsigned long __user *)data; int ret; switch (request) { @@ -133,7 +135,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) tmp = !!tsk_used_math(child); else tmp = 0; - ret = put_user(tmp, (unsigned long __user *)data); + ret = put_user(tmp, datap); break; } @@ -202,7 +204,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) } ret = 0; - if (put_user(tmp, (unsigned long *) data)) { + if (put_user(tmp, datap)) { ret = -EFAULT; break; } diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 9c64248..e15b099 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -35,6 +35,7 @@ #include #include #include +#include #include /* This mask defines the bits of the SR which the user is not allowed to diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index de83205..6d0899e 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 51689d2..345de2f 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -247,7 +248,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; sigset_t set; - stack_t st; int r0; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -265,11 +265,9 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) goto badframe; - if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) + if (do_sigaltstack(&frame->uc.uc_stack, NULL, + regs->regs[15]) == -EFAULT) goto badframe; - /* It is more difficult to avoid calling this function than to - call it and ignore errors. */ - do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame); return r0; @@ -429,7 +427,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(NULL, &frame->uc.uc_link); err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[15]), diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 9061b86..0dfb889 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -186,7 +187,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, union semun fourth; if (!ptr) return -EINVAL; - if (get_user(fourth.__pad, (void * __user *) ptr)) + if (get_user(fourth.__pad, (void __user * __user *) ptr)) return -EFAULT; return sys_semctl (first, second, third, fourth); } @@ -261,13 +262,13 @@ asmlinkage int sys_ipc(uint call, int first, int second, return -EINVAL; } -asmlinkage int sys_uname(struct old_utsname * name) +asmlinkage int sys_uname(struct old_utsname __user *name) { int err; if (!name) return -EFAULT; down_read(&uts_sem); - err = copy_to_user(name, utsname(), sizeof (*name)); + err = copy_to_user(name, utsname(), sizeof(*name)); up_read(&uts_sem); return err?-EFAULT:0; } diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c index f0aa5c3..dbba1e1 100644 --- a/arch/sh/kernel/sys_sh32.c +++ b/arch/sh/kernel/sys_sh32.c @@ -16,6 +16,7 @@ #include #include #include +#include /* * sys_pipe() is the normal C calling standard for creating @@ -37,13 +38,13 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, return error; } -asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf, +asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, size_t count, long dummy, loff_t pos) { return sys_pread64(fd, buf, count, pos); } -asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf, +asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, size_t count, long dummy, loff_t pos) { return sys_pwrite64(fd, buf, count, pos); diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index e2f74cc..23ca711 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -16,6 +16,7 @@ #include #include #include +#include /* for rtc_lock */ #include #include #include diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c index d20c8c3..c127293 100644 --- a/arch/sh/kernel/timers/timer-cmt.c +++ b/arch/sh/kernel/timers/timer-cmt.c @@ -174,7 +174,7 @@ static int cmt_timer_init(void) return 0; } -struct sys_timer_ops cmt_timer_ops = { +static struct sys_timer_ops cmt_timer_ops = { .init = cmt_timer_init, .start = cmt_timer_start, .stop = cmt_timer_stop, diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 511a942..4901f67 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -192,6 +192,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, int ret, index, count; unsigned long *rm, *rn; unsigned char *src, *dst; + unsigned char __user *srcu, *dstu; index = (instruction>>8)&15; /* 0x0F00 */ rn = ®s->regs[index]; @@ -206,28 +207,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, case 0: /* mov.[bwl] to/from memory via r0+rn */ if (instruction & 8) { /* from memory */ - src = (unsigned char*) *rm; - src += regs->regs[0]; - dst = (unsigned char*) rn; - *(unsigned long*)dst = 0; + srcu = (unsigned char __user *)*rm; + srcu += regs->regs[0]; + dst = (unsigned char *)rn; + *(unsigned long *)dst = 0; #if !defined(__LITTLE_ENDIAN__) dst += 4-count; #endif - if (ma->from(dst, src, count)) + if (ma->from(dst, srcu, count)) goto fetch_fault; sign_extend(count, dst); } else { /* to memory */ - src = (unsigned char*) rm; + src = (unsigned char *)rm; #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif - dst = (unsigned char*) *rn; - dst += regs->regs[0]; + dstu = (unsigned char __user *)*rn; + dstu += regs->regs[0]; - if (ma->to(dst, src, count)) + if (ma->to(dstu, src, count)) goto fetch_fault; } ret = 0; @@ -235,10 +236,10 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, case 1: /* mov.l Rm,@(disp,Rn) */ src = (unsigned char*) rm; - dst = (unsigned char*) *rn; - dst += (instruction&0x000F)<<2; + dstu = (unsigned char __user *)*rn; + dstu += (instruction&0x000F)<<2; - if (ma->to(dst, src, 4)) + if (ma->to(dstu, src, 4)) goto fetch_fault; ret = 0; break; @@ -247,28 +248,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, if (instruction & 4) *rn -= count; src = (unsigned char*) rm; - dst = (unsigned char*) *rn; + dstu = (unsigned char __user *)*rn; #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif - if (ma->to(dst, src, count)) + if (ma->to(dstu, src, count)) goto fetch_fault; ret = 0; break; case 5: /* mov.l @(disp,Rm),Rn */ - src = (unsigned char*) *rm; - src += (instruction&0x000F)<<2; - dst = (unsigned char*) rn; - *(unsigned long*)dst = 0; + srcu = (unsigned char __user *)*rm; + srcu += (instruction & 0x000F) << 2; + dst = (unsigned char *)rn; + *(unsigned long *)dst = 0; - if (ma->from(dst, src, 4)) + if (ma->from(dst, srcu, 4)) goto fetch_fault; ret = 0; break; case 6: /* mov.[bwl] from memory, possibly with post-increment */ - src = (unsigned char*) *rm; + srcu = (unsigned char __user *)*rm; if (instruction & 4) *rm += count; dst = (unsigned char*) rn; @@ -277,7 +278,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, #if !defined(__LITTLE_ENDIAN__) dst += 4-count; #endif - if (ma->from(dst, src, count)) + if (ma->from(dst, srcu, count)) goto fetch_fault; sign_extend(count, dst); ret = 0; @@ -286,28 +287,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, case 8: switch ((instruction&0xFF00)>>8) { case 0x81: /* mov.w R0,@(disp,Rn) */ - src = (unsigned char*) ®s->regs[0]; + src = (unsigned char *) ®s->regs[0]; #if !defined(__LITTLE_ENDIAN__) src += 2; #endif - dst = (unsigned char*) *rm; /* called Rn in the spec */ - dst += (instruction&0x000F)<<1; + dstu = (unsigned char __user *)*rm; /* called Rn in the spec */ + dstu += (instruction & 0x000F) << 1; - if (ma->to(dst, src, 2)) + if (ma->to(dstu, src, 2)) goto fetch_fault; ret = 0; break; case 0x85: /* mov.w @(disp,Rm),R0 */ - src = (unsigned char*) *rm; - src += (instruction&0x000F)<<1; - dst = (unsigned char*) ®s->regs[0]; - *(unsigned long*)dst = 0; + srcu = (unsigned char __user *)*rm; + srcu += (instruction & 0x000F) << 1; + dst = (unsigned char *) ®s->regs[0]; + *(unsigned long *)dst = 0; #if !defined(__LITTLE_ENDIAN__) dst += 2; #endif - if (ma->from(dst, src, 2)) + if (ma->from(dst, srcu, 2)) goto fetch_fault; sign_extend(2, dst); ret = 0; @@ -333,7 +334,8 @@ static inline int handle_delayslot(struct pt_regs *regs, struct mem_access *ma) { opcode_t instruction; - void *addr = (void *)(regs->pc + instruction_size(old_instruction)); + void __user *addr = (void __user *)(regs->pc + + instruction_size(old_instruction)); if (copy_from_user(&instruction, addr, sizeof(instruction))) { /* the instruction-fetch faulted */ @@ -559,7 +561,7 @@ asmlinkage void do_address_error(struct pt_regs *regs, } set_fs(USER_DS); - if (copy_from_user(&instruction, (void *)(regs->pc), + if (copy_from_user(&instruction, (void __user *)(regs->pc), sizeof(instruction))) { /* Argh. Fault on the instruction itself. This should never happen non-SMP @@ -589,7 +591,7 @@ uspace_segv: die("unaligned program counter", regs, error_code); set_fs(KERNEL_DS); - if (copy_from_user(&instruction, (void *)(regs->pc), + if (copy_from_user(&instruction, (void __user *)(regs->pc), sizeof(instruction))) { /* Argh. Fault on the instruction itself. This should never happen non-SMP diff --git a/arch/sh/lib/div64-generic.c b/arch/sh/lib/div64-generic.c index 4bef3b5..60e76aa 100644 --- a/arch/sh/lib/div64-generic.c +++ b/arch/sh/lib/div64-generic.c @@ -3,6 +3,7 @@ */ #include +#include extern uint64_t __xdiv64_32(u64 n, u32 d); diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 64b8f7f..7619a0f 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -44,7 +44,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size, */ dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); - ret_nocache = ioremap_nocache(virt_to_phys(ret), size); + ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); if (!ret_nocache) { free_pages((unsigned long)ret, order); return NULL; diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c index 677dd57..91ed4e6 100644 --- a/arch/sh/mm/pg-nommu.c +++ b/arch/sh/mm/pg-nommu.c @@ -13,6 +13,7 @@ #include #include #include +#include void copy_page(void *to, void *from) { diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index 15111bc..71c742b 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -10,6 +10,7 @@ #include #include #include +#include /* * Nothing too terribly exciting here .. diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 3df2aae..f5aebc9 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -1113,7 +1113,7 @@ static const char *sci_type(struct uart_port *port) case PORT_IRDA: return "irda"; } - return 0; + return NULL; } static void sci_release_port(struct uart_port *port) -- cgit v0.10.2 From 5840263ecb95e55a2d248fc740644a2c9171a61c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 5 Sep 2008 15:36:39 +0900 Subject: sh: Don't enable clockevents broadcasting on UP SH-X3 builds. Fixes up compile errors with missing timer definitions. It's pointless to have this enabled anyways if CONFIG_SMP=n. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 399664c..af2b174 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -326,7 +326,7 @@ config CPU_SUBTYPE_SHX3 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA select SYS_SUPPORTS_SMP - select GENERIC_CLOCKEVENTS_BROADCAST + select GENERIC_CLOCKEVENTS_BROADCAST if SMP # SH4AL-DSP Processor Support -- cgit v0.10.2 From 3159e7d62ad13f71ef3fe029c145594d8caa580d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 5 Sep 2008 15:39:12 +0900 Subject: sh: Add support for memory hot-remove. Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 8a03926..f8e6dc5 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -134,6 +134,10 @@ config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y depends on SPARSEMEM +config ARCH_ENABLE_MEMORY_HOTREMOVE + def_bool y + depends on SPARSEMEM + config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index b75a7ac..d4681a5 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -292,4 +292,21 @@ int memory_add_physaddr_to_nid(u64 addr) } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif + +#ifdef CONFIG_MEMORY_HOTREMOVE +int remove_memory(u64 start, u64 size) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long end_pfn = start_pfn + (size >> PAGE_SHIFT); + int ret; + + ret = offline_pages(start_pfn, end_pfn, 120 * HZ); + if (unlikely(ret)) + printk("%s: Failed, offline_pages() == %d\n", __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(remove_memory); #endif + +#endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v0.10.2 From c6feb6142cb85228e73497a309f475a0d7279318 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 5 Sep 2008 16:06:42 +0900 Subject: sh: early cached_to_uncached initialization. statically initialise the cached_to_uncached offset, so that we can use it immediatly. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 16509ed..f7f1056 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -58,7 +58,8 @@ do { \ last = __last; \ } while (0) -#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text"))) +#define __uses_jump_to_uncached \ + noinline __attribute__ ((__section__ (".uncached.text"))) /* * Jump to uncached area. diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d4681a5..f1a4942 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -23,7 +23,19 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -unsigned long cached_to_uncached = 0; + +#ifdef CONFIG_SUPERH32 +/* + * Handle trivial transitions between cached and uncached + * segments, making use of the 1:1 mapping relationship in + * 512MB lowmem. + * + * This is the offset of the uncached section from its cached alias. + * Default value only valid in 29 bit mode, in 32bit mode will be + * overridden in pmb_init. + */ +unsigned long cached_to_uncached = P2SEG - P1SEG; +#endif #ifdef CONFIG_MMU static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) @@ -58,9 +70,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) } set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); - - if (cached_to_uncached) - flush_tlb_one(get_asid(), addr); + flush_tlb_one(get_asid(), addr); } /* @@ -165,15 +175,6 @@ void __init paging_init(void) #ifdef CONFIG_SUPERH32 /* Set up the uncached fixmap */ set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); - -#ifdef CONFIG_29BIT - /* - * Handle trivial transitions between cached and uncached - * segments, making use of the 1:1 mapping relationship in - * 512MB lowmem. - */ - cached_to_uncached = P2SEG - P1SEG; -#endif #endif } -- cgit v0.10.2 From 28d6e52cf7e881834d2dab370afa20b6223f726c Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 5 Sep 2008 16:14:17 +0900 Subject: sh: Fix up broken 32-bit initrd support. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 6d0899e..fc7171a 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -249,17 +249,18 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) ROOT_DEV = Root_RAM0; if (LOADER_TYPE && INITRD_START) { - if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { - reserve_bootmem(INITRD_START + __MEMORY_START, - INITRD_SIZE, BOOTMEM_DEFAULT); - initrd_start = INITRD_START + PAGE_OFFSET + - __MEMORY_START; + unsigned long initrd_start_phys = INITRD_START + __MEMORY_START; + + if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { + reserve_bootmem(initrd_start_phys, INITRD_SIZE, + BOOTMEM_DEFAULT); + initrd_start = (unsigned long)__va(initrd_start_phys); initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx)\ndisabling initrd\n", - INITRD_START + INITRD_SIZE, - max_low_pfn << PAGE_SHIFT); + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + initrd_start_phys + INITRD_SIZE, + PFN_PHYS(max_low_pfn)); initrd_start = 0; } } -- cgit v0.10.2 From 96e14e54a6abd5a4bcd75e33962f87bef145d1f6 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 5 Sep 2008 16:17:15 +0900 Subject: sh: vmalloc pgtable sync fix. This fixes a problem in the code which copies the vmalloc portion of the kernel's page table into the current user space page table. The addition of the four level page table code breaks on folded page tables, because the pud level is always present (although folded). This updates the code to use the same style of updates for the pud as is used for the pgd level. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 0c776fd..e8efda9 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -61,7 +61,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, pgd = get_TTB() + offset; pgd_k = swapper_pg_dir + offset; - /* This will never happen with the folded page table. */ if (!pgd_present(*pgd)) { if (!pgd_present(*pgd_k)) goto bad_area_nosemaphore; @@ -71,9 +70,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); - if (pud_present(*pud) || !pud_present(*pud_k)) - goto bad_area_nosemaphore; - set_pud(pud, *pud_k); + + if (!pud_present(*pud)) { + if (!pud_present(*pud_k)) + goto bad_area_nosemaphore; + set_pud(pud, *pud_k); + return; + } pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); -- cgit v0.10.2 From 664718a34348a9ef6f966c3977e8df927a378134 Mon Sep 17 00:00:00 2001 From: Chris Smith Date: Fri, 5 Sep 2008 16:24:13 +0900 Subject: sh: Fix uImage load address in 32-bit mode. Fix "make uImage" load and entry addresses in 32-bit mode. Signed-off-by: Chris Smith Signed-off-by: Paul Mundt diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index 5b54965..c16ccd4 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -33,10 +33,16 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ +ifeq ($(CONFIG_32BIT),y) +KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_PAGE_OFFSET) + \ + $(CONFIG_ZERO_PAGE_OFFSET)]') +else KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ $(CONFIG_MEMORY_START) + \ $(CONFIG_ZERO_PAGE_OFFSET)]') +endif KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ -- cgit v0.10.2 From f040ddaf4cfd28f25ea9d6a42d3c734d5c3f6798 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 5 Sep 2008 16:29:40 +0900 Subject: sh: Fix an unusual memory initialisation error. This fixes a problems with the set up of Linux memory: - When reserving memory at boot time, the code previously reserved the bottom page of memory, and then from one page up to the end of the bootmap. This had the desired effect, but was strictly speaking wrong, as the one page was actually whatever CONFIG_ZERO_PAGE_OFFSET had been set to. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index fc7171a..fc098c8 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -233,15 +233,17 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ - reserve_bootmem(__MEMORY_START+PAGE_SIZE, - (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START, - BOOTMEM_DEFAULT); + reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) - + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET), + BOOTMEM_DEFAULT); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ - reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT); + reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET, + BOOTMEM_DEFAULT); sparse_memory_present_with_active_regions(0); -- cgit v0.10.2 From b6ad1e8c3f76fcc5dee506d5e79e752d296ff745 Mon Sep 17 00:00:00 2001 From: Carl Shaw Date: Fri, 5 Sep 2008 16:36:19 +0900 Subject: sh: Subnormal double to float conversion This patch adds support for the SH4 to convert a subnormal double into a float by catching the FPE and implementing the FCNVDS instruction in software. Signed-off-by: Carl Shaw Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index 2d452f6..2780917 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -36,7 +36,7 @@ extern unsigned long int float32_add(unsigned long int a, unsigned long int b); extern unsigned long long float64_sub(unsigned long long a, unsigned long long b); extern unsigned long int float32_sub(unsigned long int a, unsigned long int b); - +extern unsigned long int float64_to_float32(unsigned long long a); static unsigned int fpu_exception_flags; /* @@ -417,6 +417,29 @@ static int ieee_fpe_handler(struct pt_regs *regs) regs->pc = nextpc; return 1; + } else if ((finsn & 0xf0bd) == 0xf0bd) { + /* fcnvds - double to single precision convert */ + struct task_struct *tsk = current; + int m; + unsigned int hx; + + m = (finsn >> 9) & 0x7; + hx = tsk->thread.fpu.hard.fp_regs[m]; + + if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR) + && ((hx & 0x7fffffff) < 0x00100000)) { + /* subnormal double to float conversion */ + long long llx; + + llx = ((long long)tsk->thread.fpu.hard.fp_regs[m] << 32) + | tsk->thread.fpu.hard.fp_regs[m + 1]; + + tsk->thread.fpu.hard.fpul = float64_to_float32(llx); + } else + return 0; + + regs->pc = nextpc; + return 1; } return 0; diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c index 828cb57..2b747f3 100644 --- a/arch/sh/kernel/cpu/sh4/softfloat.c +++ b/arch/sh/kernel/cpu/sh4/softfloat.c @@ -85,6 +85,7 @@ float64 float64_div(float64 a, float64 b); float32 float32_div(float32 a, float32 b); float32 float32_mul(float32 a, float32 b); float64 float64_mul(float64 a, float64 b); +float32 float64_to_float32(float64 a); inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr); inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, @@ -890,3 +891,31 @@ float64 float64_mul(float64 a, float64 b) } return roundAndPackFloat64(zSign, zExp, zSig0); } + +/* + * ------------------------------------------------------------------------------- + * Returns the result of converting the double-precision floating-point value + * `a' to the single-precision floating-point format. The conversion is + * performed according to the IEC/IEEE Standard for Binary Floating-point + * Arithmetic. + * ------------------------------------------------------------------------------- + * */ +float32 float64_to_float32(float64 a) +{ + flag aSign; + int16 aExp; + bits64 aSig; + bits32 zSig; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + + shift64RightJamming( aSig, 22, &aSig ); + zSig = aSig; + if ( aExp || zSig ) { + zSig |= 0x40000000; + aExp -= 0x381; + } + return roundAndPackFloat32(aSign, aExp, zSig); +} -- cgit v0.10.2 From 61c66387e640abc0e0aa11519bc48ff9bb50580a Mon Sep 17 00:00:00 2001 From: Francesco Virlinzi Date: Fri, 5 Sep 2008 16:40:22 +0900 Subject: sh: fix the TMU code to allow a fully running NO_HZ system This patch fixes the TMU code to allow NO_HZ to work on sh Signed-off-by: Francesco Virlinzi Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 1ca9ad4..aaaf90d 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -28,43 +28,90 @@ #define TMU_TOCR_INIT 0x00 #define TMU_TCR_INIT 0x0020 -static int tmu_timer_start(void) +#define TMU0 (0) +#define TMU1 (1) + +static inline void _tmu_start(int tmu_num) { - ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR); - return 0; + ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<mode == CLOCK_EVT_MODE_PERIODIC); + _tmu_set_irq(TMU0,1); return 0; } @@ -96,12 +143,8 @@ static struct clock_event_device tmu0_clockevent = { static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) { struct clock_event_device *evt = &tmu0_clockevent; - unsigned long timer_status; - - /* Clear UNF bit */ - timer_status = ctrl_inw(TMU0_TCR); - timer_status &= ~0x100; - ctrl_outw(timer_status, TMU0_TCR); + _tmu_clear_status(TMU0); + _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT); evt->event_handler(evt); @@ -109,56 +152,73 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) } static struct irqaction tmu0_irq = { - .name = "periodic timer", + .name = "periodic/oneshot timer", .handler = tmu_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .mask = CPU_MASK_NONE, }; -static void tmu0_clk_init(struct clk *clk) +static void __init tmu_clk_init(struct clk *clk) { - u8 divisor = TMU_TCR_INIT & 0x7; - ctrl_outw(TMU_TCR_INIT, TMU0_TCR); - clk->rate = clk->parent->rate / (4 << (divisor << 1)); + u8 divisor = TMU_TCR_INIT & 0x7; + int tmu_num = clk->name[3]-'0'; + ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC)); + clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1)); } -static void tmu0_clk_recalc(struct clk *clk) +static void tmu_clk_recalc(struct clk *clk) { - u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; - clk->rate = clk->parent->rate / (4 << (divisor << 1)); -} + int tmu_num = clk->name[3]-'0'; + unsigned long prev_rate = clk_get_rate(clk); + unsigned long flags; + u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7; + clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1)); -static struct clk_ops tmu0_clk_ops = { - .init = tmu0_clk_init, - .recalc = tmu0_clk_recalc, -}; + if(prev_rate==clk_get_rate(clk)) + return; -static struct clk tmu0_clk = { - .name = "tmu0_clk", - .ops = &tmu0_clk_ops, -}; + if(tmu_num) + return; /* No more work on TMU1 */ -static void tmu1_clk_init(struct clk *clk) -{ - u8 divisor = TMU_TCR_INIT & 0x7; - ctrl_outw(divisor, TMU1_TCR); - clk->rate = clk->parent->rate / (4 << (divisor << 1)); -} + local_irq_save(flags); + tmus_are_scaled = (prev_rate > clk->rate); -static void tmu1_clk_recalc(struct clk *clk) -{ - u8 divisor = ctrl_inw(TMU1_TCR) & 0x7; - clk->rate = clk->parent->rate / (4 << (divisor << 1)); + _tmu_stop(TMU0); + + tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC, + tmu0_clockevent.shift); + tmu0_clockevent.max_delta_ns = + clockevent_delta2ns(-1, &tmu0_clockevent); + tmu0_clockevent.min_delta_ns = + clockevent_delta2ns(1, &tmu0_clockevent); + + if (tmus_are_scaled) + tmu_latest_interval[TMU0] >>= 1; + else + tmu_latest_interval[TMU0] <<= 1; + + tmu_timer_set_interval(TMU0, + tmu_latest_interval[TMU0], + tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC); + + _tmu_start(TMU0); + + local_irq_restore(flags); } -static struct clk_ops tmu1_clk_ops = { - .init = tmu1_clk_init, - .recalc = tmu1_clk_recalc, +static struct clk_ops tmu_clk_ops = { + .init = tmu_clk_init, + .recalc = tmu_clk_recalc, +}; + +static struct clk tmu0_clk = { + .name = "tmu0_clk", + .ops = &tmu_clk_ops, }; static struct clk tmu1_clk = { .name = "tmu1_clk", - .ops = &tmu1_clk_ops, + .ops = &tmu_clk_ops, }; static int tmu_timer_init(void) @@ -189,11 +249,12 @@ static int tmu_timer_init(void) frequency = clk_get_rate(&tmu0_clk); interval = (frequency + HZ / 2) / HZ; - sh_hpt_frequency = clk_get_rate(&tmu1_clk); - ctrl_outl(~0, TMU1_TCNT); - ctrl_outl(~0, TMU1_TCOR); + tmu_timer_set_interval(TMU0,interval, 1); + tmu_timer_set_interval(TMU1,~0,1); - tmu0_timer_set_interval(interval, 1); + _tmu_start(TMU1); + + sh_hpt_frequency = clk_get_rate(&tmu1_clk); tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, tmu0_clockevent.shift); -- cgit v0.10.2 From b6c20e4290a1ef92bcef5ec9dd8e5c7d036153aa Mon Sep 17 00:00:00 2001 From: Marek Skuczynski Date: Fri, 5 Sep 2008 16:42:58 +0900 Subject: sh: remove unnecessary memset after alloc_bootmem_low_pages Because alloc_bootmem functions return the allocated memory always zeroed, an additional call of memset on allocated memory is unnecessary. Signed-off-by: Marek Skuczynski Signed-off-by: Carl Shaw Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index f1a4942..31211bf 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -123,7 +123,6 @@ void __init page_table_range_init(unsigned long start, unsigned long end, if (!pmd_present(*pmd)) { pte_t *pte_table; pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); - memset(pte_table, 0, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte_table); } -- cgit v0.10.2 From d39f5450146ff39f66cfde9d5184420627d0ac51 Mon Sep 17 00:00:00 2001 From: Chris Smith Date: Fri, 5 Sep 2008 17:15:39 +0900 Subject: sh: Add kprobes support. Initial support for kprobes/kretprobes for 32-bit SH platforms. [ General cleanup and some rework for the kretprobe hash lock. -- PFM ] Signed-off-by: Chris Smith Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index af2b174..334917a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -20,6 +20,8 @@ config SUPERH config SUPERH32 def_bool !SUPERH64 + select HAVE_KPROBES + select HAVE_KRETPROBES config SUPERH64 def_bool y if CPU_SH5 diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h new file mode 100644 index 0000000..70fc629 --- /dev/null +++ b/arch/sh/include/asm/kprobes.h @@ -0,0 +1,59 @@ +#ifndef __ASM_SH_KPROBES_H +#define __ASM_SH_KPROBES_H + +#ifdef CONFIG_KPROBES + +#include +#include + +struct pt_regs; + +typedef u16 kprobe_opcode_t; +#define BREAKPOINT_INSTRUCTION 0xc3ff + +#define MAX_INSN_SIZE 16 +#define MAX_STACK_SIZE 64 +#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ + (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ + ? (MAX_STACK_SIZE) \ + : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) + +#define regs_return_value(regs) ((regs)->regs[0]) +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +struct kprobe; + +void arch_remove_kprobe(struct kprobe *); +void kretprobe_trampoline(void); +void jprobe_return_end(void); + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t insn[MAX_INSN_SIZE]; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long jprobe_saved_r15; + struct pt_regs jprobe_saved_regs; + kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; + struct prev_kprobe prev_kprobe; +}; + +extern int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); +extern int kprobe_handle_illslot(unsigned long pc); +#else + +#define kprobe_handle_illslot(pc) (-1) + +#endif /* CONFIG_KPROBES */ +#endif /* __ASM_SH_KPROBES_H */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 0e6905f..12e617b 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -23,5 +23,6 @@ obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_ELF_CORE) += dump_task.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o +obj-$(CONFIG_KPROBES) += kprobes.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c new file mode 100644 index 0000000..c4f4a09 --- /dev/null +++ b/arch/sh/kernel/kprobes.c @@ -0,0 +1,568 @@ +/* + * Kernel probes (kprobes) for SuperH + * + * Copyright (C) 2007 Chris Smith + * Copyright (C) 2006 Lineo Solutions, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static struct kprobe saved_current_opcode; +static struct kprobe saved_next_opcode; +static struct kprobe saved_next_opcode2; + +#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b) +#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b) +#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000) +#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023) +#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000) +#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003) + +#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00) +#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00) + +#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00) +#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900) + +#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b) +#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b) + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr); + + if (OPCODE_RTE(opcode)) + return -EFAULT; /* Bad breakpoint */ + + p->opcode = opcode; + + return 0; +} + +void __kprobes arch_copy_kprobe(struct kprobe *p) +{ + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + p->opcode = *p->addr; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + *p->addr = BREAKPOINT_INSTRUCTION; + flush_icache_range((unsigned long)p->addr, + (unsigned long)p->addr + sizeof(kprobe_opcode_t)); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + *p->addr = p->opcode; + flush_icache_range((unsigned long)p->addr, + (unsigned long)p->addr + sizeof(kprobe_opcode_t)); +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (*p->addr == BREAKPOINT_INSTRUCTION) + return 1; + + return 0; +} + +/** + * If an illegal slot instruction exception occurs for an address + * containing a kprobe, remove the probe. + * + * Returns 0 if the exception was handled successfully, 1 otherwise. + */ +int __kprobes kprobe_handle_illslot(unsigned long pc) +{ + struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1); + + if (p != NULL) { + printk("Warning: removing kprobe from delay slot: 0x%.8x\n", + (unsigned int)pc + 2); + unregister_kprobe(p); + return 0; + } + + return 1; +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (saved_next_opcode.addr != 0x0) { + arch_disarm_kprobe(p); + arch_disarm_kprobe(&saved_next_opcode); + saved_next_opcode.addr = 0x0; + saved_next_opcode.opcode = 0x0; + + if (saved_next_opcode2.addr != 0x0) { + arch_disarm_kprobe(&saved_next_opcode2); + saved_next_opcode2.addr = 0x0; + saved_next_opcode2.opcode = 0x0; + } + } +} + +static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = p; +} + +/* + * Singlestep is implemented by disabling the current kprobe and setting one + * on the next instruction, following branches. Two probes are set if the + * branch is conditional. + */ +static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t *addr = NULL; + saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc); + addr = saved_current_opcode.addr; + + if (p != NULL) { + arch_disarm_kprobe(p); + + if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) { + unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); + saved_next_opcode.addr = + (kprobe_opcode_t *) regs->regs[reg_nr]; + } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) { + unsigned long disp = (p->opcode & 0x0FFF); + saved_next_opcode.addr = + (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); + + } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) { + unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); + saved_next_opcode.addr = + (kprobe_opcode_t *) (regs->pc + 4 + + regs->regs[reg_nr]); + + } else if (OPCODE_RTS(p->opcode)) { + saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr; + + } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) { + unsigned long disp = (p->opcode & 0x00FF); + /* case 1 */ + saved_next_opcode.addr = p->addr + 1; + /* case 2 */ + saved_next_opcode2.addr = + (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); + saved_next_opcode2.opcode = *(saved_next_opcode2.addr); + arch_arm_kprobe(&saved_next_opcode2); + + } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) { + unsigned long disp = (p->opcode & 0x00FF); + /* case 1 */ + saved_next_opcode.addr = p->addr + 2; + /* case 2 */ + saved_next_opcode2.addr = + (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); + saved_next_opcode2.opcode = *(saved_next_opcode2.addr); + arch_arm_kprobe(&saved_next_opcode2); + + } else { + saved_next_opcode.addr = p->addr + 1; + } + + saved_next_opcode.opcode = *(saved_next_opcode.addr); + arch_arm_kprobe(&saved_next_opcode); + } +} + +/* Called with kretprobe_lock held */ +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->pr; + + /* Replace the return addr with trampoline addr */ + regs->pr = (unsigned long)kretprobe_trampoline; +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + int ret = 0; + kprobe_opcode_t *addr = NULL; + struct kprobe_ctlblk *kcb; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + + addr = (kprobe_opcode_t *) (regs->pc); + + /* Check we're not actually recursing */ + if (kprobe_running()) { + p = get_kprobe(addr); + if (p) { + if (kcb->kprobe_status == KPROBE_HIT_SS && + *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { + goto no_kprobe; + } + /* We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_REENTER; + return 1; + } else { + p = __get_cpu_var(current_kprobe); + if (p->break_handler && p->break_handler(p, regs)) { + goto ss_probe; + } + } + goto no_kprobe; + } + + p = get_kprobe(addr); + if (!p) { + /* Not one of ours: let kernel handle it */ + goto no_kprobe; + } + + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + if (p->pre_handler && p->pre_handler(p, regs)) + /* handler has already set things up, so skip ss setup */ + return 1; + + ss_probe: + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_HIT_SS; + return 1; + + no_kprobe: + preempt_enable_no_resched(); + return ret; +} + +/* + * For function-return probes, init_kprobes() establishes a probepoint + * here. When a retprobed function returns, this probe is hit and + * trampoline_probe_handler() runs, calling the kretprobe's handler. + */ +void kretprobe_trampoline_holder(void) +{ + asm volatile ("kretprobe_trampoline: \n" "nop\n"); +} + +/* + * Called when we hit the probe point at kretprobe_trampoline + */ +int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *node, *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + + /* + * It is possible to have multiple instances associated with a given + * task either because an multiple functions in the call path + * have a return probe installed on them, and/or more then one return + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always inserted at the head of the list + * - when multiple return probes are registered for the same + * function, the first instance's ret_addr will point to the + * real return address, and all the rest will point to + * kretprobe_trampoline + */ + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + if (ri->rp && ri->rp->handler) { + __get_cpu_var(current_kprobe) = &ri->rp->kp; + ri->rp->handler(ri, regs); + __get_cpu_var(current_kprobe) = NULL; + } + + orig_ret_address = (unsigned long)ri->ret_addr; + recycle_rp_inst(ri, &empty_rp); + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + regs->pc = orig_ret_address; + kretprobe_hash_unlock(current, &flags); + + preempt_enable_no_resched(); + + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + + return orig_ret_address; +} + +static inline int post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + kprobe_opcode_t *addr = NULL; + struct kprobe *p = NULL; + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + if (saved_next_opcode.addr != 0x0) { + arch_disarm_kprobe(&saved_next_opcode); + saved_next_opcode.addr = 0x0; + saved_next_opcode.opcode = 0x0; + + addr = saved_current_opcode.addr; + saved_current_opcode.addr = 0x0; + + p = get_kprobe(addr); + arch_arm_kprobe(p); + + if (saved_next_opcode2.addr != 0x0) { + arch_disarm_kprobe(&saved_next_opcode2); + saved_next_opcode2.addr = 0x0; + saved_next_opcode2.opcode = 0x0; + } + } + + /*Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); + + out: + preempt_enable_no_resched(); + + return 1; +} + +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + const struct exception_table_entry *entry; + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe, point the pc back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->pc = (unsigned long)cur->addr; + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if ((entry = search_exception_tables(regs->pc)) != NULL) { + regs->pc = entry->fixup; + return 1; + } + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; + } + return 0; +} + +/* + * Wrapper routine to for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct kprobe *p = NULL; + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + kprobe_opcode_t *addr = NULL; + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + addr = (kprobe_opcode_t *) (args->regs->pc); + if (val == DIE_TRAP) { + if (!kprobe_running()) { + if (kprobe_handler(args->regs)) { + ret = NOTIFY_STOP; + } else { + /* Not a kprobe trap */ + force_sig(SIGTRAP, current); + } + } else { + p = get_kprobe(addr); + if ((kcb->kprobe_status == KPROBE_HIT_SS) || + (kcb->kprobe_status == KPROBE_REENTER)) { + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + } else { + if (kprobe_handler(args->regs)) { + ret = NOTIFY_STOP; + } else { + p = __get_cpu_var(current_kprobe); + if (p->break_handler + && p->break_handler(p, args->regs)) + ret = NOTIFY_STOP; + } + } + } + } + + return ret; +} + +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct jprobe *jp = container_of(p, struct jprobe, kp); + unsigned long addr; + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + kcb->jprobe_saved_regs = *regs; + kcb->jprobe_saved_r15 = regs->regs[15]; + addr = kcb->jprobe_saved_r15; + + /* + * TBD: As Linus pointed out, gcc assumes that the callee + * owns the argument space and could overwrite it, e.g. + * tailcall optimization. So, to be absolutely safe + * we also save and restore enough stack bytes to cover + * the argument area. + */ + memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, + MIN_STACK_SIZE(addr)); + + regs->pc = (unsigned long)(jp->entry); + + return 1; +} + +void __kprobes jprobe_return(void) +{ + __asm("trapa #-1\n\t" "jprobe_return_end:\n\t" "nop\n\t"); + +} + +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + u8 *addr = (u8 *) regs->pc; + unsigned long stack_addr = kcb->jprobe_saved_r15; + + if ((addr >= (u8 *) jprobe_return) + && (addr <= (u8 *) jprobe_return_end)) { + *regs = kcb->jprobe_saved_regs; + + memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, + MIN_STACK_SIZE(stack_addr)); + + kcb->kprobe_status = KPROBE_HIT_SS; + return 1; + } + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + saved_next_opcode.addr = 0x0; + saved_next_opcode.opcode = 0x0; + + saved_current_opcode.addr = 0x0; + saved_current_opcode.opcode = 0x0; + + saved_next_opcode2.addr = 0x0; + saved_next_opcode2.opcode = 0x0; + + return register_kprobe(&trampoline_p); +} diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 4901f67..862667a 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -26,6 +26,7 @@ #include #include #include +#include #ifdef CONFIG_SH_KGDB #include @@ -743,6 +744,10 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, struct pt_regs *regs = RELOC_HIDE(&__regs, 0); unsigned long error_code; struct task_struct *tsk = current; + + if (kprobe_handle_illslot(regs->pc) == 0) + return; + #ifdef CONFIG_SH_FPU_EMU unsigned short inst = 0; -- cgit v0.10.2 From 6907e6a601a4fd442535d977886bbd3761aa2b56 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 5 Sep 2008 17:27:37 +0900 Subject: sh: Add the rest of the boot targets to arch/sh/boot/.gitignore. Signed-off-by: Paul Mundt diff --git a/arch/sh/boot/.gitignore b/arch/sh/boot/.gitignore index b6718de..aad5edd 100644 --- a/arch/sh/boot/.gitignore +++ b/arch/sh/boot/.gitignore @@ -1 +1,4 @@ zImage +vmlinux.srec +uImage +uImage.srec -- cgit v0.10.2 From 205a3b4328de1c8ddd99ddd5092bed1344068213 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 5 Sep 2008 18:00:29 +0900 Subject: sh: uninline flush_icache_all(). This uses jump_to_uncached() which is now given the noinline attribute due to the special section mapping. Kill off the inline attribute to fix up compilation failure. Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 1fdc8d9..5cfe08d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -261,7 +261,7 @@ void flush_dcache_page(struct page *page) } /* TODO: Selective icache invalidation through IC address array.. */ -static inline void __uses_jump_to_uncached flush_icache_all(void) +static void __uses_jump_to_uncached flush_icache_all(void) { unsigned long flags, ccr; -- cgit v0.10.2 From 53abf911fa6753dfbd6775ae541fb2f8b9f5b825 Mon Sep 17 00:00:00 2001 From: Luca Santini Date: Mon, 8 Sep 2008 11:54:56 +0900 Subject: sh: Enable IRLM mode for SH7760 IRQ_MODE_IRQ. Follows the same setting as SH7750. Signed-off-by: Luca Santini Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 254c5c5..d9bdc93 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -11,6 +11,7 @@ #include #include #include +#include enum { UNUSED = 0, @@ -178,10 +179,14 @@ static int __init sh7760_devices_setup(void) } __initcall(sh7760_devices_setup); +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1 << 7) + void __init plat_irq_setup_pins(int mode) { switch (mode) { case IRQ_MODE_IRQ: + ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); register_intc_controller(&intc_desc_irq); break; default: -- cgit v0.10.2 From 3db9170880241d63aae7ab86a03aa15418c3e5c6 Mon Sep 17 00:00:00 2001 From: Luca Santini Date: Mon, 8 Sep 2008 12:01:15 +0900 Subject: sh: Add Renesas EDOSK7760 board support. This adds support for the Renesas (RTE) EDOSK7760 board. Currently supported devices are: - ramdisk support - ethernet support - nfs support - ext2/ext3 support - i2c support - fb support (M) Signed-off-by: Luca Santini Signed-off-by: Paul Mundt diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index ae19486..c6b21e8 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -184,6 +184,13 @@ config SH_EDOSK7705 bool "EDOSK7705" depends on CPU_SUBTYPE_SH7705 +config SH_EDOSK7760 + bool "EDOSK7760" + depends on CPU_SUBTYPE_SH7760 + help + Select if configuring for a Renesas EDOSK7760 + evaluation board. + config SH_SH4202_MICRODEV bool "SH4-202 MicroDev" depends on CPU_SUBTYPE_SH4_202 diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index 463022c..d9efa39 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o obj-$(CONFIG_SH_RSK7203) += board-rsk7203.o obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o obj-$(CONFIG_SH_SHMIN) += board-shmin.o +obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c new file mode 100644 index 0000000..7cc5e11 --- /dev/null +++ b/arch/sh/boards/board-edosk7760.c @@ -0,0 +1,144 @@ +/* + * Renesas Europe EDOSK7760 Board Support + * + * Copyright (C) 2008 SPES Societa' Progettazione Elettronica e Software Ltd. + * Author: Luca Santini + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Bus state controller registers for CS4 area */ +#define BSC_CS4BCR 0xA4FD0010 +#define BSC_CS4WCR 0xA4FD0030 + +#define SMC_IOBASE 0xA2000000 +#define SMC_IO_OFFSET 0x300 +#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET) + +#define ETHERNET_IRQ 5 + +/* i2c initialization functions */ +static struct sh7760_i2c_platdata i2c_pd = { + .speed_khz = 400, +}; + +static struct resource sh7760_i2c1_res[] = { + { + .start = SH7760_I2C1_MMIO, + .end = SH7760_I2C1_MMIOEND, + .flags = IORESOURCE_MEM, + },{ + .start = SH7760_I2C1_IRQ, + .end = SH7760_I2C1_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device sh7760_i2c1_dev = { + .dev = { + .platform_data = &i2c_pd, + }, + + .name = SH7760_I2C_DEVNAME, + .id = 1, + .resource = sh7760_i2c1_res, + .num_resources = ARRAY_SIZE(sh7760_i2c1_res), +}; + +static struct resource sh7760_i2c0_res[] = { + { + .start = SH7760_I2C0_MMIO, + .end = SH7760_I2C0_MMIOEND, + .flags = IORESOURCE_MEM, + }, { + .start = SH7760_I2C0_IRQ, + .end = SH7760_I2C0_IRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device sh7760_i2c0_dev = { + .dev = { + .platform_data = &i2c_pd, + }, + .name = SH7760_I2C_DEVNAME, + .id = 0, + .resource = sh7760_i2c0_res, + .num_resources = ARRAY_SIZE(sh7760_i2c0_res), +}; + +/* eth initialization functions */ +static struct smc91x_platdata smc91x_info = { + .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL, +}; + +static struct resource smc91x_res[] = { + [0] = { + .start = SMC_IOADDR, + .end = SMC_IOADDR + 0x1f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = ETHERNET_IRQ, + .end = ETHERNET_IRQ, + .flags = IORESOURCE_IRQ , + } +}; + +static struct platform_device smc91x_dev = { + .name = "smc91x", + .id = -1, + .num_resources = ARRAY_SIZE(smc91x_res), + .resource = smc91x_res, + + .dev = { + .platform_data = &smc91x_info, + }, +}; + +/* platform init code */ +static struct platform_device *edosk7760_devices[] __initdata = { + &sh7760_i2c0_dev, + &sh7760_i2c1_dev, + &smc91x_dev, +}; + +static int __init init_edosk7760_devices(void) +{ + plat_irq_setup_pins(IRQ_MODE_IRQ); + + return platform_add_devices(edosk7760_devices, + ARRAY_SIZE(edosk7760_devices)); +} +__initcall(init_edosk7760_devices); + +/* + * The Machine Vector + */ +struct sh_machine_vector mv_edosk7760 __initmv = { + .mv_name = "EDOSK7760", + .mv_nr_irqs = 128, +}; diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig new file mode 100644 index 0000000..f8ec32a --- /dev/null +++ b/arch/sh/configs/edosk7760_defconfig @@ -0,0 +1,1050 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.26 +# Tue Aug 26 11:36:09 2008 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_ARCH_SUPPORTS_AOUT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="_edosk7760" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +# CONFIG_CGROUPS is not set +# CONFIG_GROUP_SCHED is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_HAVE_KPROBES is not set +# CONFIG_HAVE_KRETPROBES is not set +# CONFIG_HAVE_DMA_ATTRS is not set +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_LSF is not set +# CONFIG_BLK_DEV_BSG is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_CLASSIC_RCU=y + +# +# System type +# +CONFIG_CPU_SH4=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7751R is not set +CONFIG_CPU_SUBTYPE_SH7760=y +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x0c000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_29BIT=y +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_STATIC=y +# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_RESOURCES_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU=y +CONFIG_SH_STORE_QUEUES=y +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y +CONFIG_CPU_HAS_PTEA=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +CONFIG_SH_EDOSK7760=y + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=16 +CONFIG_SH_PCLK_FREQ=33333333 +CONFIG_TICK_ONESHOT=y +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +CONFIG_SH_DMA_API=y +CONFIG_SH_DMA=y +CONFIG_NR_ONCHIP_DMA_CHANNELS=4 +# CONFIG_NR_DMA_CHANNELS_BOOL is not set +# CONFIG_SH_DMABRG is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +# CONFIG_HEARTBEAT is not set +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +# CONFIG_SCHED_HRTICK is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +# CONFIG_PREEMPT_RCU is not set +CONFIG_GUSA=y +# CONFIG_GUSA_RB is not set + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x02000000 +# CONFIG_UBC_WAKEUP is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="mem=64M console=ttySC2,115200 root=/dev/nfs rw nfsroot=192.168.0.3:/scripts/filesys ip=192.168.0.4" + +# +# Bus options +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set + +# +# Wireless +# +# CONFIG_CFG80211 is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_MAC80211 is not set +# CONFIG_IEEE80211 is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +CONFIG_DEBUG_DRIVER=y +CONFIG_DEBUG_DEVRES=y +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +CONFIG_MTD_DEBUG=y +CONFIG_MTD_DEBUG_VERBOSE=3 +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set +CONFIG_MTD_MAP_BANK_WIDTH_2=y +# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +# CONFIG_MTD_CFI_I2 is not set +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_CFI_UTIL=y +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_START=0xA0000000 +CONFIG_MTD_PHYSMAP_LEN=0x01000000 +CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=26000 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_NETDEVICES_MULTIQUEUE is not set +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_AX88796 is not set +# CONFIG_STNIC is not set +# CONFIG_SMC9194 is not set +CONFIG_SMC91X=y +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_B44 is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set +# CONFIG_IWLWIFI_LEDS is not set +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=3 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_PCA_PLATFORM is not set +CONFIG_I2C_SH7760=y +# CONFIG_I2C_SH_MOBILE is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_SENSORS_TSL2550 is not set +CONFIG_I2C_DEBUG_CORE=y +CONFIG_I2C_DEBUG_ALGO=y +CONFIG_I2C_DEBUG_BUS=y +CONFIG_I2C_DEBUG_CHIP=y +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set + +# +# Sonics Silicon Backplane +# +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=m +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +CONFIG_FB_CFB_FILLRECT=m +CONFIG_FB_CFB_COPYAREA=m +CONFIG_FB_CFB_IMAGEBLIT=m +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_MB86290_640X480_16BPP is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set + +# +# Sound +# +CONFIG_SOUND=y + +# +# Advanced Linux Sound Architecture +# +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +# CONFIG_SND_DYNAMIC_MINORS is not set +# CONFIG_SND_SUPPORT_OLD_API is not set +# CONFIG_SND_VERBOSE_PROCFS is not set +CONFIG_SND_VERBOSE_PRINTK=y +# CONFIG_SND_DEBUG is not set + +# +# Generic devices +# +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set + +# +# SUPERH devices +# + +# +# System on Chip audio support +# +CONFIG_SND_SOC=y + +# +# SoC Audio support for SuperH +# + +# +# ALSA SoC audio for Freescale SOCs +# + +# +# SoC Audio for the Texas Instruments OMAP +# + +# +# Open Sound System +# +# CONFIG_SOUND_PRIME is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set +# CONFIG_UIO is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +# CONFIG_EXT2_FS_POSIX_ACL is not set +# CONFIG_EXT2_FS_SECURITY is not set +CONFIG_EXT2_FS_XIP=y +CONFIG_FS_XIP=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4DEV_FS is not set +CONFIG_JBD=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set +CONFIG_GENERIC_ACL=y + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_SUNRPC_BIND34 is not set +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +CONFIG_NLS_ISO8859_15=y +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +CONFIG_UNUSED_SYMBOLS=y +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_DEBUG_PREEMPT=y +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_SAMPLES is not set +# CONFIG_SH_STANDARD_BIOS is not set +CONFIG_EARLY_SCIF_CONSOLE=y +CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000 +CONFIG_EARLY_PRINTK=y +# CONFIG_DEBUG_BOOTMEM is not set +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_4KSTACKS is not set +# CONFIG_IRQSTACKS is not set +# CONFIG_SH_KGDB is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_LZO is not set +CONFIG_CRYPTO_HW=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +# CONFIG_GENERIC_FIND_FIRST_BIT is not set +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y -- cgit v0.10.2 From 4ad06dd6f1ec745c5ee0e37ec26b1b817f88507a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 12:01:55 +0900 Subject: sh: Add EDOSK7760 mach type. Signed-off-by: Paul Mundt diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index 0a11cc08..d4fb11f 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types @@ -30,6 +30,7 @@ HP6XX SH_HP6XX DREAMCAST SH_DREAMCAST SNAPGEAR SH_SECUREEDGE5410 EDOSK7705 SH_EDOSK7705 +EDOSK7760 SH_EDOSK7760 SH4202_MICRODEV SH_SH4202_MICRODEV SH03 SH_SH03 LANDISK SH_LANDISK -- cgit v0.10.2 From e7cb016e5a3163e2999d8715390d64eb46816655 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 12:02:17 +0900 Subject: sh: Mark kretprobe_trampoline_holder static and __used. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index c4f4a09..ac60749 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -277,7 +277,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) * here. When a retprobed function returns, this probe is hit and * trampoline_probe_handler() runs, calling the kretprobe's handler. */ -void kretprobe_trampoline_holder(void) +static void __used kretprobe_trampoline_holder(void) { asm volatile ("kretprobe_trampoline: \n" "nop\n"); } -- cgit v0.10.2 From fc63562ac2107dfa843f5288fe985fc6f0021c17 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 12:10:35 +0900 Subject: sh: Disable seccomp support by default. This was initially checked in with a stupid default of y, while most everyone is going to want to have this disabled anyways. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 334917a..c5b08eb 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -496,7 +496,6 @@ config CRASH_DUMP config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" depends on PROC_FS - default y help This kernel feature is useful for number crunching applications that may need to compute untrusted bytecode during their -- cgit v0.10.2 From 037c10a612e8b7461e33672fb3848807ac6e2346 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 12:22:47 +0900 Subject: sh: kprobes: Hook up kprobe_fault_handler() in the page fault path. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 70fc629..756a5cd 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -6,8 +6,6 @@ #include #include -struct pt_regs; - typedef u16 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION 0xc3ff @@ -48,6 +46,7 @@ struct kprobe_ctlblk { struct prev_kprobe prev_kprobe; }; +extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); extern int kprobe_handle_illslot(unsigned long pc); diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index ac60749..81a3725 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -393,7 +393,7 @@ static inline int post_kprobe_handler(struct pt_regs *regs) return 1; } -static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index e8efda9..659811c 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -2,7 +2,7 @@ * Page fault handler for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 - 2007 Paul Mundt + * Copyright (C) 2003 - 2008 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds @@ -21,6 +21,27 @@ #include #include +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs, int trap) +{ + int ret = 0; + + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, trap)) + ret = 1; + preempt_enable(); + } + + return ret; +} +#else +static inline int notify_page_fault(struct pt_regs *regs, int trap) +{ + return 0; +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -37,6 +58,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, int fault; siginfo_t info; + if (notify_page_fault(regs, writeaccess)) + return; + #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) kgdb_bus_err_hook(); @@ -269,6 +293,9 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, pte_t *pte; pte_t entry; + if (notify_page_fault(regs, writeaccess)) + return 0; + #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) kgdb_bus_err_hook(); -- cgit v0.10.2 From 174b5c9923e0170c844e03d55a9f3fb3b329a8b7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 18:10:10 +0900 Subject: sh: kprobes: Use trapa #0x3a for breakpoint trap. Not all parts support trapa #0xff, so use something within the debug trap range that's accessible on all parts. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h index 756a5cd..6078d8e 100644 --- a/arch/sh/include/asm/kprobes.h +++ b/arch/sh/include/asm/kprobes.h @@ -7,7 +7,7 @@ #include typedef u16 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xc3ff +#define BREAKPOINT_INSTRUCTION 0xc33a #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 81a3725..fdd049e 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -525,8 +525,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) void __kprobes jprobe_return(void) { - __asm("trapa #-1\n\t" "jprobe_return_end:\n\t" "nop\n\t"); - + asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t"); } int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) -- cgit v0.10.2 From ee386de77419f9fedf206d84c4d4b2de0ead5bcb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 18:12:33 +0900 Subject: sh: kprobes: Default to NOTIFY_DONE for unhandled debug traps. Presently this is doing a force_sig() SIGTRAP, which is already taken care of in the generic code if no one asserts NOTIFY_STOP. Switch the default return to NOTIFY_DONE in the case of unhandled traps, so that the same trap may pass through to other users on the same die chain. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index fdd049e..75accf9 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -474,7 +474,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; } else { /* Not a kprobe trap */ - force_sig(SIGTRAP, current); + ret = NOTIFY_DONE; } } else { p = get_kprobe(addr); -- cgit v0.10.2 From 247bc6d2ae3e2de08529977952c7d085f9d562d4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 18:14:50 +0900 Subject: sh: kprobes: Fix up a preemption imbalance on jprobe return. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 75accf9..f0e1c78 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -542,6 +542,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) MIN_STACK_SIZE(stack_addr)); kcb->kprobe_status = KPROBE_HIT_SS; + preempt_enable_no_resched(); return 1; } return 0; -- cgit v0.10.2 From 734db3770de03fbe9ae4e78519a7d1678189788c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 18:15:55 +0900 Subject: sh: kprobes: Fix up race against probe point removal. Handle a corner case where another CPU or debugger removes the probe point from underneath us. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index f0e1c78..a478ba7 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -252,6 +252,17 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) p = get_kprobe(addr); if (!p) { /* Not one of ours: let kernel handle it */ + if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + */ + ret = 1; + } + goto no_kprobe; } -- cgit v0.10.2 From 4eb5845d6cbdb9bf03f563c22f3a54115121858f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 18:22:47 +0900 Subject: sh: kprobes: __kprobes annotations and formatting cleanups. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index a478ba7..e357e36 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -115,20 +115,20 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) } } -static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; } -static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; } -static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; } @@ -138,7 +138,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, * on the next instruction, following branches. Two probes are set if the * branch is conditional. */ -static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { kprobe_opcode_t *addr = NULL; saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc); @@ -273,12 +273,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) /* handler has already set things up, so skip ss setup */ return 1; - ss_probe: +ss_probe: prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; - no_kprobe: +no_kprobe: preempt_enable_no_resched(); return ret; } @@ -358,7 +358,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) return orig_ret_address; } -static inline int post_kprobe_handler(struct pt_regs *regs) +static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -391,14 +391,15 @@ static inline int post_kprobe_handler(struct pt_regs *regs) } } - /*Restore back the original saved kprobes variables and continue. */ + /* Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } + reset_current_kprobe(); - out: +out: preempt_enable_no_resched(); return 1; @@ -463,6 +464,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) default: break; } + return 0; } @@ -498,8 +500,8 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; } else { p = __get_cpu_var(current_kprobe); - if (p->break_handler - && p->break_handler(p, args->regs)) + if (p->break_handler && + p->break_handler(p, args->regs)) ret = NOTIFY_STOP; } } @@ -542,25 +544,26 @@ void __kprobes jprobe_return(void) int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - u8 *addr = (u8 *) regs->pc; unsigned long stack_addr = kcb->jprobe_saved_r15; + u8 *addr = (u8 *)regs->pc; - if ((addr >= (u8 *) jprobe_return) - && (addr <= (u8 *) jprobe_return_end)) { + if ((addr >= (u8 *)jprobe_return) && + (addr <= (u8 *)jprobe_return_end)) { *regs = kcb->jprobe_saved_regs; - memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, + memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack, MIN_STACK_SIZE(stack_addr)); kcb->kprobe_status = KPROBE_HIT_SS; preempt_enable_no_resched(); return 1; } + return 0; } static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .addr = (kprobe_opcode_t *)&kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; -- cgit v0.10.2 From cf204fa797cf968de8043491cd469ad0321d0940 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Sep 2008 20:47:42 +0900 Subject: sh: Derive calibrate_delay lpj from clk fwk. All CPUs must have a sensible cpu_clk definition these days, which we can safely use for deriving the preset loops_per_jiffy. The only odd one out is SH-5, which hasn't been hammered in to the framework yet. Based on the ST patch. Signed-off-by: Francesco Virlinzi Signed-off-by: Carl Shaw Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index c5b08eb..bbdcd64 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -25,6 +25,7 @@ config SUPERH32 config SUPERH64 def_bool y if CPU_SH5 + select GENERIC_CALIBRATE_DELAY config ARCH_DEFCONFIG string @@ -57,7 +58,7 @@ config GENERIC_IRQ_PROBE def_bool y config GENERIC_CALIBRATE_DELAY - def_bool y + bool config GENERIC_IOMAP bool diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index fc098c8..267b344 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include #include @@ -180,6 +182,24 @@ static inline void __init reserve_crashkernel(void) {} #endif +#ifndef CONFIG_GENERIC_CALIBRATE_DELAY +void __cpuinit calibrate_delay(void) +{ + struct clk *clk = clk_get(NULL, "cpu_clk"); + + if (IS_ERR(clk)) + panic("Need a sane CPU clock definition!"); + + loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ; + + printk(KERN_INFO "Calibrating delay loop (skipped)... " + "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n", + loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100, + loops_per_jiffy); +} +#endif + void __init __add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) { -- cgit v0.10.2 From 61098a086671e2d386104e562fb7cd97cc197d1b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 9 Sep 2008 06:15:47 +0900 Subject: sh: Add R2D+ defconfig for qemu system emulator. This adds a defconfig for the R2D+ target in the qemu system emulator. Eventually it will be possible to simply use the r2d+ defconfig as it is. Provided by Shin-ichiro KAWASAKI. Signed-off-by: Paul Mundt diff --git a/arch/sh/configs/rts7751r2dplus_qemu_defconfig b/arch/sh/configs/rts7751r2dplus_qemu_defconfig new file mode 100644 index 0000000..a72796c --- /dev/null +++ b/arch/sh/configs/rts7751r2dplus_qemu_defconfig @@ -0,0 +1,909 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.27-rc2 +# Mon Aug 18 22:17:44 2008 +# +CONFIG_SUPERH=y +CONFIG_SUPERH32=y +CONFIG_ARCH_DEFCONFIG="arch/sh/configs/shx3_defconfig" +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_SYS_SUPPORTS_PCI=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_ARCH_NO_VIRT_TO_BUS=y +CONFIG_IO_TRAPPED=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_CGROUPS is not set +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +# CONFIG_HOTPLUG is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_COMPAT_BRK=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_ANON_INODES=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_PROFILING=y +# CONFIG_MARKERS is not set +CONFIG_OPROFILE=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set +# CONFIG_HAVE_IOREMAP_PROT is not set +# CONFIG_HAVE_KPROBES is not set +# CONFIG_HAVE_KRETPROBES is not set +# CONFIG_HAVE_ARCH_TRACEHOOK is not set +# CONFIG_HAVE_DMA_ATTRS is not set +# CONFIG_USE_GENERIC_SMP_HELPERS is not set +CONFIG_HAVE_CLK=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +# CONFIG_MODULE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_LSF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_AS=y +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="anticipatory" +CONFIG_CLASSIC_RCU=y + +# +# System type +# +CONFIG_CPU_SH4=y +# CONFIG_CPU_SUBTYPE_SH7619 is not set +# CONFIG_CPU_SUBTYPE_SH7203 is not set +# CONFIG_CPU_SUBTYPE_SH7206 is not set +# CONFIG_CPU_SUBTYPE_SH7263 is not set +# CONFIG_CPU_SUBTYPE_MXG is not set +# CONFIG_CPU_SUBTYPE_SH7705 is not set +# CONFIG_CPU_SUBTYPE_SH7706 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7710 is not set +# CONFIG_CPU_SUBTYPE_SH7712 is not set +# CONFIG_CPU_SUBTYPE_SH7720 is not set +# CONFIG_CPU_SUBTYPE_SH7721 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7091 is not set +# CONFIG_CPU_SUBTYPE_SH7750R is not set +# CONFIG_CPU_SUBTYPE_SH7750S is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +CONFIG_CPU_SUBTYPE_SH7751R=y +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_SH4_202 is not set +# CONFIG_CPU_SUBTYPE_SH7723 is not set +# CONFIG_CPU_SUBTYPE_SH7763 is not set +# CONFIG_CPU_SUBTYPE_SH7770 is not set +# CONFIG_CPU_SUBTYPE_SH7780 is not set +# CONFIG_CPU_SUBTYPE_SH7785 is not set +# CONFIG_CPU_SUBTYPE_SHX3 is not set +# CONFIG_CPU_SUBTYPE_SH7343 is not set +# CONFIG_CPU_SUBTYPE_SH7722 is not set +# CONFIG_CPU_SUBTYPE_SH7366 is not set +# CONFIG_CPU_SUBTYPE_SH5_101 is not set +# CONFIG_CPU_SUBTYPE_SH5_103 is not set + +# +# Memory management options +# +CONFIG_QUICKLIST=y +CONFIG_MMU=y +CONFIG_PAGE_OFFSET=0x80000000 +CONFIG_MEMORY_START=0x0c000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_29BIT=y +CONFIG_VSYSCALL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_MAX_ACTIVE_REGIONS=1 +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_ENTRY_OFFSET=0x00001000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_STATIC=y +# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_RESOURCES_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_NR_QUICK=2 + +# +# Cache configuration +# +# CONFIG_SH_DIRECT_MAPPED is not set +CONFIG_CACHE_WRITEBACK=y +# CONFIG_CACHE_WRITETHROUGH is not set +# CONFIG_CACHE_OFF is not set + +# +# Processor features +# +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SH_FPU=y +# CONFIG_SH_STORE_QUEUES is not set +CONFIG_CPU_HAS_INTEVT=y +CONFIG_CPU_HAS_SR_RB=y +CONFIG_CPU_HAS_PTEA=y +CONFIG_CPU_HAS_FPU=y + +# +# Board support +# +# CONFIG_SH_7751_SYSTEMH is not set +# CONFIG_SH_SECUREEDGE5410 is not set +CONFIG_SH_RTS7751R2D=y +# CONFIG_SH_LANDISK is not set +# CONFIG_SH_TITAN is not set +# CONFIG_SH_LBOX_RE2 is not set + +# +# RTS7751R2D Board Revision +# +CONFIG_RTS7751R2D_PLUS=y +# CONFIG_RTS7751R2D_1 is not set + +# +# Timer and clock configuration +# +CONFIG_SH_TMU=y +CONFIG_SH_TIMER_IRQ=16 +CONFIG_SH_PCLK_FREQ=60000000 +# CONFIG_TICK_ONESHOT is not set +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# + +# +# Additional SuperH Device Drivers +# +CONFIG_HEARTBEAT=y +# CONFIG_PUSH_SWITCH is not set + +# +# Kernel features +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +# CONFIG_SCHED_HRTICK is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_SECCOMP=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_GUSA=y +# CONFIG_GUSA_RB is not set + +# +# Boot options +# +CONFIG_ZERO_PAGE_OFFSET=0x00010000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +# CONFIG_UBC_WAKEUP is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial" + +# +# Bus options +# +# CONFIG_PCI is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_NET is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_MTD is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_BLK_DEV_HD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_TGT is not set +# CONFIG_SCSI_NETLINK is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set +CONFIG_SCSI_WAIT_SCAN=m + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_DH is not set +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_SATA_PMP=y +CONFIG_ATA_SFF=y +# CONFIG_SATA_MV is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_MD is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_CONSOLE is not set +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=1 +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_I2C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_BITBANG=y +# CONFIG_SPI_SH_SCI is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_AT25 is not set +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_HWMON_DEBUG_CHIP is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set + +# +# Sonics Silicon Backplane +# +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +CONFIG_MFD_SM501=y +# CONFIG_HTC_PASIC3 is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_DAB=y + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_SH_MOBILE_LCDC=m +CONFIG_FB_SM501=y +# CONFIG_FB_VIRTUAL is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +CONFIG_LOGO_SUPERH_CLUT224=y +CONFIG_SOUND=y +CONFIG_SND=m +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +# CONFIG_SND_DYNAMIC_MINORS is not set +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +CONFIG_SND_SPI=y +CONFIG_SND_SUPERH=y +# CONFIG_SND_SOC is not set +CONFIG_SOUND_PRIME=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HID_DEBUG is not set +# CONFIG_HIDRAW is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +CONFIG_RTC_DRV_R9701=y +# CONFIG_RTC_DRV_RS5C348 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_SH is not set +# CONFIG_DMADEVICES is not set +# CONFIG_UIO is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4DEV_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=y +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +CONFIG_NLS_CODEPAGE_932=y +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_SAMPLES is not set +# CONFIG_SH_STANDARD_BIOS is not set +CONFIG_EARLY_SCIF_CONSOLE=y +CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000 +CONFIG_EARLY_PRINTK=y +# CONFIG_DEBUG_BOOTMEM is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_4KSTACKS is not set +# CONFIG_IRQSTACKS is not set +# CONFIG_SH_KGDB is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_LZO is not set +CONFIG_CRYPTO_HW=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +# CONFIG_GENERIC_FIND_FIRST_BIT is not set +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +CONFIG_CRC_T10DIF=y +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y -- cgit v0.10.2 From 6eb2139b3dc3e1c5181a7cdf83a517c57c34bb12 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 9 Sep 2008 08:13:28 +0900 Subject: sh: kprobes: kretprobe_trampoline needs to be global. Needed by CONFIG_TRACING. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index e357e36..c96850b 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -290,7 +290,9 @@ no_kprobe: */ static void __used kretprobe_trampoline_holder(void) { - asm volatile ("kretprobe_trampoline: \n" "nop\n"); + asm volatile (".globl kretprobe_trampoline\n" + "kretprobe_trampoline:\n\t" + "nop\n"); } /* -- cgit v0.10.2 From b21a91043592434e2847c4b552be7b51851d92c3 Mon Sep 17 00:00:00 2001 From: roel kluin Date: Tue, 9 Sep 2008 23:02:43 +0200 Subject: sh: intc_prio_data() test before subtraction on unsigned bit is unsigned, so test before subtraction Signed-off-by: Roel Kluin Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c index 8c70e20..94536d3 100644 --- a/arch/sh/kernel/cpu/irq/intc.c +++ b/arch/sh/kernel/cpu/irq/intc.c @@ -464,9 +464,10 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc, } fn += (pr->reg_width >> 3) - 1; - bit = pr->reg_width - ((j + 1) * pr->field_width); - BUG_ON(bit < 0); + BUG_ON((j + 1) * pr->field_width > pr->reg_width); + + bit = pr->reg_width - ((j + 1) * pr->field_width); return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), @@ -531,9 +532,10 @@ static unsigned int __init intc_sense_data(struct intc_desc *desc, fn = REG_FN_MODIFY_BASE; fn += (sr->reg_width >> 3) - 1; - bit = sr->reg_width - ((j + 1) * sr->field_width); - BUG_ON(bit < 0); + BUG_ON((j + 1) * sr->field_width > sr->reg_width); + + bit = sr->reg_width - ((j + 1) * sr->field_width); return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), 0, sr->field_width, bit); -- cgit v0.10.2 From cc3c080d9f4484021e7b14f99de94a8c85a668d5 Mon Sep 17 00:00:00 2001 From: roel kluin Date: Wed, 10 Sep 2008 19:22:44 +0200 Subject: sh_eth: unsigned ndev->irq cannot be negative unsigned ndev->irq cannot be negative Signed-off-by: Roel Kluin Signed-off-by: Paul Mundt diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 1c370e6..1a04814 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -1205,11 +1205,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev) devno = 0; ndev->dma = -1; - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq < 0) { + ret = platform_get_irq(pdev, 0); + if (ret < 0) { ret = -ENODEV; goto out_release; } + ndev->irq = ret; SET_NETDEV_DEV(ndev, &pdev->dev); -- cgit v0.10.2 From 2641dc92b3c7f979c7e4820cff2e765664358982 Mon Sep 17 00:00:00 2001 From: roel kluin Date: Wed, 10 Sep 2008 19:34:44 +0200 Subject: rtc-sh: Unsigned rtc->{periodic,carry,alarm}_irq cannot be negative possibly since commit b420b1a7a17ea88531d0e12b2f2679a0c8365803 Signed-off-by: Roel Kluin Signed-off-by: Paul Mundt diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 1f88e9e..690a780 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -575,7 +575,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) struct sh_rtc *rtc; struct resource *res; unsigned int tmp; - int ret = -ENOENT; + int ret; rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); if (unlikely(!rtc)) @@ -584,26 +584,33 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev) spin_lock_init(&rtc->lock); /* get periodic/carry/alarm irqs */ - rtc->periodic_irq = platform_get_irq(pdev, 0); - if (unlikely(rtc->periodic_irq < 0)) { + ret = platform_get_irq(pdev, 0); + if (unlikely(ret < 0)) { + ret = -ENOENT; dev_err(&pdev->dev, "No IRQ for period\n"); goto err_badres; } + rtc->periodic_irq = ret; - rtc->carry_irq = platform_get_irq(pdev, 1); - if (unlikely(rtc->carry_irq < 0)) { + ret = platform_get_irq(pdev, 1); + if (unlikely(ret < 0)) { + ret = -ENOENT; dev_err(&pdev->dev, "No IRQ for carry\n"); goto err_badres; } + rtc->carry_irq = ret; - rtc->alarm_irq = platform_get_irq(pdev, 2); - if (unlikely(rtc->alarm_irq < 0)) { + ret = platform_get_irq(pdev, 2); + if (unlikely(ret < 0)) { + ret = -ENOENT; dev_err(&pdev->dev, "No IRQ for alarm\n"); goto err_badres; } + rtc->alarm_irq = ret; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (unlikely(res == NULL)) { + ret = -ENOENT; dev_err(&pdev->dev, "No IO resource\n"); goto err_badres; } -- cgit v0.10.2 From 3bd012060f962567aadb52b27b2fc8fdc91102c7 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 8 Sep 2008 08:58:59 -0700 Subject: hrtimer: make the nanosleep() syscall use the per process slack This patch makes the nanosleep() system call use the per process slack value; with this users are able to externally control existing applications to reduce the wakeup rate. Signed-off-by: Arjan van de Ven diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index a022209..9a4c901 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1563,9 +1563,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, struct restart_block *restart; struct hrtimer_sleeper t; int ret = 0; + unsigned long slack; + + slack = current->timer_slack_ns; + if (rt_task(current)) + slack = 0; hrtimer_init_on_stack(&t.timer, clockid, mode); - hrtimer_set_expires(&t.timer, timespec_to_ktime(*rqtp)); + hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); if (do_nanosleep(&t, mode)) goto out; -- cgit v0.10.2 From ae4b748e81b7e366f04f55229d5e372e372c33af Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 8 Sep 2008 09:03:57 -0700 Subject: hrtimer: make the futex() system call use the per process slack value This patch makes the futex() system call use the per process slack value; with this users are able to externally control existing applications to reduce the wakeup rate. Signed-off-by: Arjan van de Ven diff --git a/kernel/futex.c b/kernel/futex.c index 4cd5b43..8af1002 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1296,10 +1296,14 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, if (!abs_time) schedule(); else { + unsigned long slack; + slack = current->timer_slack_ns; + if (rt_task(current)) + slack = 0; hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(&t, current); - hrtimer_set_expires(&t.timer, *abs_time); + hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&t.timer)) -- cgit v0.10.2 From 2e94d1f71f7e4404d997e6fb4f1618aa147d76f9 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 10 Sep 2008 16:06:00 -0700 Subject: hrtimer: peek at the timer queue just before going idle As part of going idle, we already look at the time of the next timer event to determine which C-state to select etc. This patch adds functionality that causes the timers that are past their soft expire time, to fire at this time, before we calculate the next wakeup time. This functionality will thus avoid wakeups by running timers before going idle rather than specially waking up for it. Signed-off-by: Arjan van de Ven diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 5ce07b5..2e31484 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "cpuidle.h" @@ -60,6 +61,12 @@ static void cpuidle_idle_call(void) return; } + /* + * run any timers that can be run now, at this point + * before calculating the idle duration etc. + */ + hrtimer_peek_ahead_timers(); + /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); if (need_resched()) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 95db11f..d93b1e1 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -326,6 +326,11 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); + +DECLARE_PER_CPU(struct tick_device, tick_cpu_device); +extern void hrtimer_peek_ahead_timers(void); + + /* Exported timer functions: */ /* Initialize timers: */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 9a4c901..eb2cf98 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1381,6 +1381,36 @@ void hrtimer_interrupt(struct clock_event_device *dev) raise_softirq(HRTIMER_SOFTIRQ); } +/** + * hrtimer_peek_ahead_timers -- run soft-expired timers now + * + * hrtimer_peek_ahead_timers will peek at the timer queue of + * the current cpu and check if there are any timers for which + * the soft expires time has passed. If any such timers exist, + * they are run immediately and then removed from the timer queue. + * + */ +void hrtimer_peek_ahead_timers(void) +{ + unsigned long flags; + struct tick_device *td; + struct clock_event_device *dev; + + if (hrtimer_hres_active()) + return; + + local_irq_save(flags); + td = &__get_cpu_var(tick_cpu_device); + if (!td) + goto out; + dev = td->evtdev; + if (!dev) + goto out; + hrtimer_interrupt(dev); +out: + local_irq_restore(flags); +} + static void run_hrtimer_softirq(struct softirq_action *h) { run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); -- cgit v0.10.2 From 4018ffcfdf84faf17fbadcd29ea5eced26f9d9cb Mon Sep 17 00:00:00 2001 From: Luca Santini Date: Fri, 12 Sep 2008 18:07:16 +0900 Subject: sh: edosk7760 physmap-flash support. Signed-off-by: Luca Santini Signed-off-by: Paul Mundt diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c index 7cc5e11..4890ba7 100644 --- a/arch/sh/boards/board-edosk7760.c +++ b/arch/sh/boards/board-edosk7760.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,52 @@ #define ETHERNET_IRQ 5 +/* NOR flash */ +static struct mtd_partition edosk7760_nor_flash_partitions[] = { + { + .name = "bootloader", + .offset = 0, + .size = (1 * 1024 * 1024), /*1MB*/ + .mask_flags = MTD_WRITEABLE, /* Read-only */ + }, { + .name = "kernel", + .offset = MTDPART_OFS_APPEND, + .size = (2 * 1024 * 1024), /*2MB*/ + }, { + .name = "fs", + .offset = MTDPART_OFS_APPEND, + .size = (26 * 1024 * 1024), + }, { + .name = "other", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct physmap_flash_data edosk7760_nor_flash_data = { + .width = 4, + .parts = edosk7760_nor_flash_partitions, + .nr_parts = ARRAY_SIZE(edosk7760_nor_flash_partitions), +}; + +static struct resource edosk7760_nor_flash_resources[] = { + [0] = { + .name = "NOR Flash", + .start = 0x00000000, + .end = (32 * 1024 * 1024) -1, /* 32MB*/ + .flags = IORESOURCE_MEM, + } +}; + +static struct platform_device edosk7760_nor_flash_device = { + .name = "physmap-flash", + .resource = edosk7760_nor_flash_resources, + .num_resources = ARRAY_SIZE(edosk7760_nor_flash_resources), + .dev = { + .platform_data = &edosk7760_nor_flash_data, + }, +}; + /* i2c initialization functions */ static struct sh7760_i2c_platdata i2c_pd = { .speed_khz = 400, @@ -121,9 +168,10 @@ static struct platform_device smc91x_dev = { /* platform init code */ static struct platform_device *edosk7760_devices[] __initdata = { + &smc91x_dev, + &edosk7760_nor_flash_device, &sh7760_i2c0_dev, &sh7760_i2c1_dev, - &smc91x_dev, }; static int __init init_edosk7760_devices(void) -- cgit v0.10.2 From 1b582d19ce9e14bf5bbc36688bf818b3c42574da Mon Sep 17 00:00:00 2001 From: Luca Santini Date: Fri, 12 Sep 2008 18:08:01 +0900 Subject: sh: update edosk7760 defconfig for physmap-flash. Signed-off-by: Luca Santini Signed-off-by: Paul Mundt diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index f8ec32a..bef07fa 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -380,11 +380,11 @@ CONFIG_DEBUG_DEVRES=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y CONFIG_MTD_DEBUG=y -CONFIG_MTD_DEBUG_VERBOSE=3 +CONFIG_MTD_DEBUG_VERBOSE=0 CONFIG_MTD_CONCAT=y CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y # CONFIG_MTD_AR7_PARTS is not set # @@ -411,16 +411,16 @@ CONFIG_MTD_CFI_NOSWAP=y # CONFIG_MTD_CFI_BE_BYTE_SWAP is not set # CONFIG_MTD_CFI_LE_BYTE_SWAP is not set CONFIG_MTD_CFI_GEOMETRY=y -# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y CONFIG_MTD_MAP_BANK_WIDTH_2=y -# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set -# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_MAP_BANK_WIDTH_8=y +CONFIG_MTD_MAP_BANK_WIDTH_16=y +CONFIG_MTD_MAP_BANK_WIDTH_32=y CONFIG_MTD_CFI_I1=y -# CONFIG_MTD_CFI_I2 is not set -# CONFIG_MTD_CFI_I4 is not set -# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_I4=y +CONFIG_MTD_CFI_I8=y # CONFIG_MTD_OTP is not set CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y @@ -435,9 +435,9 @@ CONFIG_MTD_ABSENT=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0xA0000000 -CONFIG_MTD_PHYSMAP_LEN=0x01000000 -CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +CONFIG_MTD_PHYSMAP_START=0xffffffff +CONFIG_MTD_PHYSMAP_LEN=0x0 +CONFIG_MTD_PHYSMAP_BANKWIDTH=4 # CONFIG_MTD_PLATRAM is not set # -- cgit v0.10.2 From 09558748464a9afafe2848a3ad4cfd509c9b0fb6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 18:58:28 +0900 Subject: sh: Provide a fixed UTS_MACHINE definition for sh64. Signed-off-by: Paul Mundt diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 01d85c7..0bc9560 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -76,8 +76,10 @@ KBUILD_IMAGE := $(defaultimage-y) # error messages during linking. # ifdef CONFIG_SUPERH32 +UTS_MACHINE := sh LDFLAGS_vmlinux += -e _stext else +UTS_MACHINE := sh64 LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \ --defsym phys_stext_shmedia=phys_stext+1 \ -e phys_stext_shmedia -- cgit v0.10.2 From 934135c19d8a1be435bae75aefc09b8ae1698b16 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 19:52:36 +0900 Subject: sh: ptrace: Introduce user_regset interface for gp regs. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index f01449a..455d9e1 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -108,6 +108,15 @@ typedef struct user_fpu_struct elf_fpregset_t; #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) +#if defined(CONFIG_SUPERH32) && \ + (!defined(CONFIG_SH_FPU) && !defined(CONFIG_SH_DSP)) +/* + * Enable dump using regset for general purpose registers, use this as + * the default once the FPU and DSP registers are moved over also. + */ +#define CORE_DUMP_USE_REGSET +#endif + #define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index b86aeab..bf73646 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -87,12 +87,18 @@ struct pt_dspregs { unsigned long mod; }; +#define PTRACE_GETREGS 12 /* General registers */ +#define PTRACE_SETREGS 13 + +#define PTRACE_GETFPREGS 14 /* FPU registers */ +#define PTRACE_SETFPREGS 15 + #define PTRACE_GETFDPIC 31 /* get the ELF fdpic loadmap address */ #define PTRACE_GETFDPIC_EXEC 0 /* [addr] request the executable loadmap */ #define PTRACE_GETFDPIC_INTERP 1 /* [addr] request the interpreter loadmap */ -#define PTRACE_GETDSPREGS 55 +#define PTRACE_GETDSPREGS 55 /* DSP registers */ #define PTRACE_SETDSPREGS 56 #endif diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 84bf342..5e3ba10 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -1,12 +1,14 @@ /* - * linux/arch/sh/kernel/ptrace.c + * SuperH process tracing * - * Original x86 implementation: - * By Ross Biro 1/23/92 - * edited by Linus Torvalds + * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * Copyright (C) 2002 - 2008 Paul Mundt * - * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka - * Audit support: Yuichi Nakamura + * Audit support by Yuichi Nakamura + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ #include #include @@ -22,6 +24,8 @@ #include #include #include +#include +#include #include #include #include @@ -30,11 +34,6 @@ #include /* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. - */ - -/* * This routine will get a word off of the process kernel stack. */ static inline int get_stack_long(struct task_struct *task, int offset) @@ -62,16 +61,12 @@ static inline int put_stack_long(struct task_struct *task, int offset, void user_enable_single_step(struct task_struct *child) { - struct pt_regs *regs = task_pt_regs(child); - long pc; - - pc = get_stack_long(child, (long)®s->pc); - /* Next scheduling will set up UBC */ if (child->thread.ubc_pc == 0) ubc_usercnt += 1; - child->thread.ubc_pc = pc; + child->thread.ubc_pc = get_stack_long(child, + offsetof(struct pt_regs, pc)); set_tsk_thread_flag(child, TIF_SINGLESTEP); } @@ -103,6 +98,83 @@ void ptrace_disable(struct task_struct *child) user_disable_single_step(child); } +static int genregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const struct pt_regs *regs = task_pt_regs(target); + int ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->regs, + 0, 16 * sizeof(unsigned long)); + if (!ret) + /* PC, PR, SR, GBR, MACH, MACL, TRA */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + ®s->pc, + offsetof(struct pt_regs, pc), + sizeof(struct pt_regs)); + if (!ret) + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_regs), -1); + + return ret; +} + +static int genregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->regs, + 0, 16 * sizeof(unsigned long)); + if (!ret && count > 0) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + offsetof(struct pt_regs, pc), + sizeof(struct pt_regs)); + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_regs), -1); + + return ret; +} + +/* + * These are our native regset flavours. + */ +enum sh_regset { + REGSET_GENERAL, +}; + +static const struct user_regset sh_regsets[] = { + /* + * Format is: + * R0 --> R15 + * PC, PR, SR, GBR, MACH, MACL, TRA + */ + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(long), + .align = sizeof(long), + .get = genregs_get, + .set = genregs_set, + }, +}; + +static const struct user_regset_view user_sh_native_view = { + .name = "sh", + .e_machine = EM_SH, + .regsets = sh_regsets, + .n = ARRAY_SIZE(sh_regsets), +}; + long arch_ptrace(struct task_struct *child, long request, long addr, long data) { struct user * dummy = NULL; @@ -159,6 +231,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) } break; + case PTRACE_GETREGS: + return copy_regset_to_user(child, &user_sh_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (void __user *)data); + case PTRACE_SETREGS: + return copy_regset_from_user(child, &user_sh_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (const void __user *)data); #ifdef CONFIG_SH_DSP case PTRACE_GETDSPREGS: { unsigned long dp; -- cgit v0.10.2 From cb700aa4f13d38726defab3060d3ebeaf67dc189 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 20:41:05 +0900 Subject: sh: ioremap_prot support. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index bbdcd64..434183e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -12,6 +12,7 @@ config SUPERH select HAVE_IDE select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT + select HAVE_IOREMAP_PROT help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index e49cfee..1857666 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -347,6 +347,8 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) __ioremap_mode((offset), (size), _PAGE_CACHABLE) #define p3_ioremap(offset, size, flags) \ __ioremap((offset), (size), (flags)) +#define ioremap_prot(offset, size, flags) \ + __ioremap_mode((offset), (size), (flags)) #define iounmap(addr) \ __iounmap((addr)) diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 77fb8bf..5871d78 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -104,6 +104,8 @@ typedef struct { unsigned long pgd; } pgd_t; typedef struct page *pgtable_t; +#define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK) + #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index a4a8f8b..52220d7 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -76,6 +76,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #endif #define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) +#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) #ifdef CONFIG_SUPERH32 #define VMALLOC_START (P3SEG) -- cgit v0.10.2 From f8b890ab4ca60c05b5621b267712709d329f7612 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:08:20 +0900 Subject: sh: Flag T-bit for syscall restart. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 345de2f..be194d0 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -490,37 +490,43 @@ give_sigsegv: return -EFAULT; } +static inline void +handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, + struct sigaction *sa) +{ + /* If we're not from a syscall, bail out */ + if (regs->tra < 0) + return; + + /* check for system call restart.. */ + switch (regs->regs[0]) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + no_system_call_restart: + regs->regs[0] = -EINTR; + regs->sr |= 1; + break; + + case -ERESTARTSYS: + if (!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + /* fallthrough */ + case -ERESTARTNOINTR: + regs->regs[0] = save_r0; + regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); + break; + } +} + /* * OK, we're invoking a handler */ - static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0) { int ret; - /* Are we from a system call? */ - if (regs->tra >= 0) { - /* If so, check system call restarting.. */ - switch (regs->regs[0]) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - no_system_call_restart: - regs->regs[0] = -EINTR; - break; - - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) - goto no_system_call_restart; - /* fallthrough */ - case -ERESTARTNOINTR: - regs->regs[0] = save_r0; - regs->pc -= instruction_size( - ctrl_inw(regs->pc - 4)); - break; - } - } /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) @@ -578,6 +584,9 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { + if (regs->sr & 1) + handle_syscall_restart(save_r0, regs, &ka.sa); + /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &ka, &info, oldset, regs, save_r0) == 0) { -- cgit v0.10.2 From 9996b42ac06adb7555933366e071ec8824bcaa37 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:11:36 +0900 Subject: sh: provide user_stack_pointer(), needed for tracehook support. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 41d2321..1cd3a14 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -199,6 +199,8 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) +#define user_stack_pointer(regs) ((regs)->regs[15]) + #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") #define cpu_relax() barrier() diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 16609bc..ae19839 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -267,6 +267,8 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.pc) #define KSTK_ESP(tsk) ((tsk)->thread.sp) +#define user_stack_pointer(regs) ((regs)->sp) + #define cpu_relax() barrier() #endif /* __ASSEMBLY__ */ -- cgit v0.10.2 From fb4f87a2f048b4cb1a499c9baa78f1d8437b09c3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:13:13 +0900 Subject: sh: Provide the asm/syscall.h interface, needed by tracehook. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h new file mode 100644 index 0000000..6a38142 --- /dev/null +++ b/arch/sh/include/asm/syscall.h @@ -0,0 +1,10 @@ +#ifndef __ASM_SH_SYSCALL_H +#define __ASM_SH_SYSCALL_H + +#ifdef CONFIG_SUPERH32 +# include "syscall_32.h" +#else +# include "syscall_64.h" +#endif + +#endif /* __ASM_SH_SYSCALL_H */ diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h new file mode 100644 index 0000000..54773f2 --- /dev/null +++ b/arch/sh/include/asm/syscall_32.h @@ -0,0 +1,110 @@ +#ifndef __ASM_SH_SYSCALL_32_H +#define __ASM_SH_SYSCALL_32_H + +#include +#include +#include + +/* The system call number is given by the user in %g1 */ +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return (regs->tra >= 0) ? regs->regs[3] : -1L; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* + * XXX: This needs some thought. On SH we don't + * save away the original r0 value anywhere. + */ +} + +static inline bool syscall_has_error(struct pt_regs *regs) +{ + return (regs->sr & 0x1) ? true : false; +} +static inline void syscall_set_error(struct pt_regs *regs) +{ + regs->sr |= 0x1; +} +static inline void syscall_clear_error(struct pt_regs *regs) +{ + regs->sr &= ~0x1; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + return syscall_has_error(regs) ? regs->regs[0] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + syscall_set_error(regs); + regs->regs[0] = -error; + } else { + syscall_clear_error(regs); + regs->regs[0] = val; + } +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + /* + * Do this simply for now. If we need to start supporting + * fetching arguments from arbitrary indices, this will need some + * extra logic. Presently there are no in-tree users that depend + * on this behaviour. + */ + BUG_ON(i); + + /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ + switch (n) { + case 6: args[5] = regs->regs[1]; + case 5: args[4] = regs->regs[0]; + case 4: args[3] = regs->regs[7]; + case 3: args[2] = regs->regs[6]; + case 2: args[1] = regs->regs[5]; + case 1: args[0] = regs->regs[4]; + break; + default: + BUG(); + } +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + /* Same note as above applies */ + BUG_ON(i); + + switch (n) { + case 6: regs->regs[1] = args[5]; + case 5: regs->regs[0] = args[4]; + case 4: regs->regs[7] = args[3]; + case 3: regs->regs[6] = args[2]; + case 2: regs->regs[5] = args[1]; + case 1: regs->regs[4] = args[0]; + break; + default: + BUG(); + } +} + +#endif /* __ASM_SH_SYSCALL_32_H */ diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h new file mode 100644 index 0000000..bcaaa8c --- /dev/null +++ b/arch/sh/include/asm/syscall_64.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH_SYSCALL_64_H +#define __ASM_SH_SYSCALL_64_H + +#include + +#endif /* __ASM_SH_SYSCALL_64_H */ -- cgit v0.10.2 From 3231739d97b348c628a10fb49adfa9143e1de28b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:13:36 +0900 Subject: sh: Enable HAVE_ARCH_TRACEHOOK. Now that the rest of the support requirements are out of the way, finally enable support for tracehook. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 434183e..f995d13 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -23,6 +23,7 @@ config SUPERH32 def_bool !SUPERH64 select HAVE_KPROBES select HAVE_KRETPROBES + select HAVE_ARCH_TRACEHOOK if (!SH_FPU && !SH_DSP) config SUPERH64 def_bool y if CPU_SH5 -- cgit v0.10.2 From 6bff1592d85c9fa1f1d9d4de1cd0e104279544a6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:41:30 +0900 Subject: sh: Fix up NUMA build error with se7722_defconfig. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 2969253..7f5363b 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -4,6 +4,8 @@ #ifdef __KERNEL__ #ifdef CONFIG_NEED_MULTIPLE_NODES +#include + extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index 554f865..1b7856f 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -1,7 +1,7 @@ #ifndef _SH_SETUP_H #define _SH_SETUP_H -#include +#include #define COMMAND_LINE_SIZE 256 -- cgit v0.10.2 From 5dadb34394d59313e2e763ae8e2fc911e9fc557c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:42:10 +0900 Subject: sh: Add DSP registers to regset interface. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f995d13..acaba1b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -23,7 +23,7 @@ config SUPERH32 def_bool !SUPERH64 select HAVE_KPROBES select HAVE_KRETPROBES - select HAVE_ARCH_TRACEHOOK if (!SH_FPU && !SH_DSP) + select HAVE_ARCH_TRACEHOOK if !SH_FPU config SUPERH64 def_bool y if CPU_SH5 diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 455d9e1..7c2363f 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -108,11 +108,10 @@ typedef struct user_fpu_struct elf_fpregset_t; #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) -#if defined(CONFIG_SUPERH32) && \ - (!defined(CONFIG_SH_FPU) && !defined(CONFIG_SH_DSP)) +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_SH_FPU) /* * Enable dump using regset for general purpose registers, use this as - * the default once the FPU and DSP registers are moved over also. + * the default once the FPU registers are moved over also. */ #define CORE_DUMP_USE_REGSET #endif diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index bf73646..3ad18e9 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -123,6 +123,9 @@ extern void user_disable_single_step(struct task_struct *); #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1) +#define task_pt_dspregs(task) \ + ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE \ + - sizeof(unsigned long)) - 1) #else #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 5e3ba10..20b103f 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -145,11 +145,50 @@ static int genregs_set(struct task_struct *target, return ret; } +#ifdef CONFIG_SH_DSP +static int dspregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + const struct pt_dspregs *regs = task_pt_dspregs(target); + int ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, + 0, sizeof(struct pt_dspregs)); + if (!ret) + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_dspregs), -1); + + return ret; +} + +static int dspregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_dspregs *regs = task_pt_dspregs(target); + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, + 0, sizeof(struct pt_dspregs)); + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_dspregs), -1); + + return ret; +} +#endif + /* * These are our native regset flavours. */ enum sh_regset { REGSET_GENERAL, +#ifdef CONFIG_SH_DSP + REGSET_DSP, +#endif }; static const struct user_regset sh_regsets[] = { @@ -166,6 +205,16 @@ static const struct user_regset sh_regsets[] = { .get = genregs_get, .set = genregs_set, }, + +#ifdef CONFIG_SH_DSP + [REGSET_DSP] = { + .n = sizeof(struct pt_dspregs) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .get = dspregs_get, + .set = dspregs_set, + }, +#endif }; static const struct user_regset_view user_sh_native_view = { @@ -242,33 +291,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) 0, sizeof(struct pt_regs), (const void __user *)data); #ifdef CONFIG_SH_DSP - case PTRACE_GETDSPREGS: { - unsigned long dp; - - ret = -EIO; - dp = ((unsigned long) child) + THREAD_SIZE - - sizeof(struct pt_dspregs); - if (*((int *) (dp - 4)) == SR_FD) { - copy_to_user((void *)addr, (void *) dp, - sizeof(struct pt_dspregs)); - ret = 0; - } - break; - } - - case PTRACE_SETDSPREGS: { - unsigned long dp; - - ret = -EIO; - dp = ((unsigned long) child) + THREAD_SIZE - - sizeof(struct pt_dspregs); - if (*((int *) (dp - 4)) == SR_FD) { - copy_from_user((void *) dp, (void *)addr, - sizeof(struct pt_dspregs)); - ret = 0; - } - break; - } + case PTRACE_GETDSPREGS: + return copy_regset_to_user(child, &user_sh_native_view, + REGSET_DSP, + 0, sizeof(struct pt_dspregs), + (void __user *)data); + case PTRACE_SETDSPREGS: + return copy_regset_from_user(child, &user_sh_native_view, + REGSET_DSP, + 0, sizeof(struct pt_dspregs), + (const void __user *)data); #endif #ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { -- cgit v0.10.2 From f9540ececaa2cf94b6760741c82f25097e662383 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:42:43 +0900 Subject: sh: Add missing task_user_regset_view() definition. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 20b103f..92fe203 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -224,6 +224,11 @@ static const struct user_regset_view user_sh_native_view = { .n = ARRAY_SIZE(sh_regsets), }; +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sh_native_view; +} + long arch_ptrace(struct task_struct *child, long request, long addr, long data) { struct user * dummy = NULL; -- cgit v0.10.2 From 72461997c3c66c29775afa68ca31bea16bf17f39 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 22:56:35 +0900 Subject: sh: Check SR.DSP bit for DSP regset validity. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 92fe203..0f44f2b 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -179,6 +179,14 @@ static int dspregs_set(struct task_struct *target, return ret; } + +static int dspregs_active(struct task_struct *target, + const struct user_regset *regset) +{ + struct pt_regs *regs = task_pt_regs(target); + + return regs->sr & SR_DSP ? regset->n : 0; +} #endif /* @@ -213,6 +221,7 @@ static const struct user_regset sh_regsets[] = { .align = sizeof(long), .get = dspregs_get, .set = dspregs_set, + .active = dspregs_active, }, #endif }; -- cgit v0.10.2 From 0e660d2d433393f983cd58fe8c54f831fa7c7713 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Sep 2008 23:27:46 +0900 Subject: sh: Tidy up ELF core dumps. These have been using overrides for ELF_CORE_COPY_TASK_REGS and ELF_CORE_COPY_FPREGS while the generic versions can be used instead. Presently the pt_regs are also duplicated across elf_core_copy_regs() and elf_core_copy_task_regs(), this switches to simply copying out through elf_core_copy_regs() instead. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 7c2363f..6b2cec8 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -198,12 +198,6 @@ do { \ #endif #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) -struct task_struct; -extern int dump_task_regs (struct task_struct *, elf_gregset_t *); -extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); - -#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #ifdef CONFIG_VSYSCALL /* vDSO has arch_setup_additional_pages */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 12e617b..8497de8 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -21,7 +21,6 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_ELF_CORE) += dump_task.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 6edf53b..e987eb2 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -17,7 +17,6 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_BINFMT_ELF) += dump_task.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c deleted file mode 100644 index 1db7ce0..0000000 --- a/arch/sh/kernel/dump_task.c +++ /dev/null @@ -1,32 +0,0 @@ -#include -#include -#include - -/* - * Capture the user space registers if the task is not running (in user space) - */ -int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) -{ - struct pt_regs ptregs; - - ptregs = *task_pt_regs(tsk); - elf_core_copy_regs(regs, &ptregs); - - return 1; -} - -int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu) -{ - int fpvalid = 0; - -#if defined(CONFIG_SH_FPU) - fpvalid = !!tsk_used_math(tsk); - if (fpvalid) { - unlazy_fpu(tsk, task_pt_regs(tsk)); - memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); - } -#endif - - return fpvalid; -} - -- cgit v0.10.2 From 5a89f1adbc5ce44988aab0c370ae2f1478061307 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 13 Sep 2008 01:44:03 +0900 Subject: sh: latencytop support. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index acaba1b..18a1cc8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -100,6 +100,10 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y +config HAVE_LATENCYTOP_SUPPORT + def_bool y + depends on !SMP + config ARCH_HAS_ILOG2_U32 def_bool n diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 54d1f61..1a2a5eb 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -3,7 +3,7 @@ * * Stack trace management functions * - * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2006 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -36,3 +36,24 @@ void save_stack_trace(struct stack_trace *trace) } } EXPORT_SYMBOL_GPL(save_stack_trace); + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + unsigned long *sp = (unsigned long *)tsk->thread.sp; + + while (!kstack_end(sp)) { + unsigned long addr = *sp++; + + if (__kernel_text_address(addr)) { + if (in_sched_functions(addr)) + break; + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = addr; + if (trace->nr_entries >= trace->max_entries) + break; + } + } +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); -- cgit v0.10.2 From fdb0ac80618729e6b12121c66449b8532990eaf3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 13 Sep 2008 19:57:04 -0700 Subject: async_tx: make async_tx_run_dependencies() easier to read * Rename 'next' to 'dep' * Move the channel switch check inside the loop to simplify termination Acked-by: Ilya Yanok Signed-off-by: Dan Williams diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index e8362c1..dcbf1be 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -115,34 +115,32 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); * (start) dependent operations on their target channel * @tx: transaction with dependencies */ -void -async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) +void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) { - struct dma_async_tx_descriptor *next = tx->next; + struct dma_async_tx_descriptor *dep = tx->next; + struct dma_async_tx_descriptor *dep_next; struct dma_chan *chan; - if (!next) + if (!dep) return; - tx->next = NULL; - chan = next->chan; + chan = dep->chan; /* keep submitting up until a channel switch is detected * in that case we will be called again as a result of * processing the interrupt from async_tx_channel_switch */ - while (next && next->chan == chan) { - struct dma_async_tx_descriptor *_next; - - spin_lock_bh(&next->lock); - next->parent = NULL; - _next = next->next; - if (_next && _next->chan == chan) - next->next = NULL; - spin_unlock_bh(&next->lock); - - next->tx_submit(next); - next = _next; + for (; dep; dep = dep_next) { + spin_lock_bh(&dep->lock); + dep->parent = NULL; + dep_next = dep->next; + if (dep_next && dep_next->chan == chan) + dep->next = NULL; /* ->next will be submitted */ + else + dep_next = NULL; /* submit current dep and terminate */ + spin_unlock_bh(&dep->lock); + + dep->tx_submit(dep); } chan->device->device_issue_pending(chan); -- cgit v0.10.2 From 89f72a0633d1d4f28c4c5c8831ec814523d7671a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 13 Sep 2008 20:05:34 -0700 Subject: drivers/dma/ioat_dma.c: drop code after return The break after the return serves no purpose. Signed-off-by: Julia Lawall Reviewed-by: Richard Genoud Signed-off-by: Andrew Morton Signed-off-by: Dan Williams diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index bc8c6e3..1ef68b3 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -971,11 +971,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( switch (ioat_chan->device->version) { case IOAT_VER_1_2: return ioat1_dma_get_next_descriptor(ioat_chan); - break; case IOAT_VER_2_0: case IOAT_VER_3_0: return ioat2_dma_get_next_descriptor(ioat_chan); - break; } return NULL; } -- cgit v0.10.2 From f06febc96ba8e0af80bcc3eaec0a109e88275fac Mon Sep 17 00:00:00 2001 From: Frank Mayhar Date: Fri, 12 Sep 2008 09:54:39 -0700 Subject: timers: fix itimer/many thread hang Overview This patch reworks the handling of POSIX CPU timers, including the ITIMER_PROF, ITIMER_VIRT timers and rlimit handling. It was put together with the help of Roland McGrath, the owner and original writer of this code. The problem we ran into, and the reason for this rework, has to do with using a profiling timer in a process with a large number of threads. It appears that the performance of the old implementation of run_posix_cpu_timers() was at least O(n*3) (where "n" is the number of threads in a process) or worse. Everything is fine with an increasing number of threads until the time taken for that routine to run becomes the same as or greater than the tick time, at which point things degrade rather quickly. This patch fixes bug 9906, "Weird hang with NPTL and SIGPROF." Code Changes This rework corrects the implementation of run_posix_cpu_timers() to make it run in constant time for a particular machine. (Performance may vary between one machine and another depending upon whether the kernel is built as single- or multiprocessor and, in the latter case, depending upon the number of running processors.) To do this, at each tick we now update fields in signal_struct as well as task_struct. The run_posix_cpu_timers() function uses those fields to make its decisions. We define a new structure, "task_cputime," to contain user, system and scheduler times and use these in appropriate places: struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; }; This is included in the structure "thread_group_cputime," which is a new substructure of signal_struct and which varies for uniprocessor versus multiprocessor kernels. For uniprocessor kernels, it uses "task_cputime" as a simple substructure, while for multiprocessor kernels it is a pointer: struct thread_group_cputime { struct task_cputime totals; }; struct thread_group_cputime { struct task_cputime *totals; }; We also add a new task_cputime substructure directly to signal_struct, to cache the earliest expiration of process-wide timers, and task_cputime also replaces the it_*_expires fields of task_struct (used for earliest expiration of thread timers). The "thread_group_cputime" structure contains process-wide timers that are updated via account_user_time() and friends. In the non-SMP case the structure is a simple aggregator; unfortunately in the SMP case that simplicity was not achievable due to cache-line contention between CPUs (in one measured case performance was actually _worse_ on a 16-cpu system than the same test on a 4-cpu system, due to this contention). For SMP, the thread_group_cputime counters are maintained as a per-cpu structure allocated using alloc_percpu(). The timer functions update only the timer field in the structure corresponding to the running CPU, obtained using per_cpu_ptr(). We define a set of inline functions in sched.h that we use to maintain the thread_group_cputime structure and hide the differences between UP and SMP implementations from the rest of the kernel. The thread_group_cputime_init() function initializes the thread_group_cputime structure for the given task. The thread_group_cputime_alloc() is a no-op for UP; for SMP it calls the out-of-line function thread_group_cputime_alloc_smp() to allocate and fill in the per-cpu structures and fields. The thread_group_cputime_free() function, also a no-op for UP, in SMP frees the per-cpu structures. The thread_group_cputime_clone_thread() function (also a UP no-op) for SMP calls thread_group_cputime_alloc() if the per-cpu structures haven't yet been allocated. The thread_group_cputime() function fills the task_cputime structure it is passed with the contents of the thread_group_cputime fields; in UP it's that simple but in SMP it must also safely check that tsk->signal is non-NULL (if it is it just uses the appropriate fields of task_struct) and, if so, sums the per-cpu values for each online CPU. Finally, the three functions account_group_user_time(), account_group_system_time() and account_group_exec_runtime() are used by timer functions to update the respective fields of the thread_group_cputime structure. Non-SMP operation is trivial and will not be mentioned further. The per-cpu structure is always allocated when a task creates its first new thread, via a call to thread_group_cputime_clone_thread() from copy_signal(). It is freed at process exit via a call to thread_group_cputime_free() from cleanup_signal(). All functions that formerly summed utime/stime/sum_sched_runtime values from from all threads in the thread group now use thread_group_cputime() to snapshot the values in the thread_group_cputime structure or the values in the task structure itself if the per-cpu structure hasn't been allocated. Finally, the code in kernel/posix-cpu-timers.c has changed quite a bit. The run_posix_cpu_timers() function has been split into a fast path and a slow path; the former safely checks whether there are any expired thread timers and, if not, just returns, while the slow path does the heavy lifting. With the dedicated thread group fields, timers are no longer "rebalanced" and the process_timer_rebalance() function and related code has gone away. All summing loops are gone and all code that used them now uses the thread_group_cputime() inline. When process-wide timers are set, the new task_cputime structure in signal_struct is used to cache the earliest expiration; this is checked in the fast path. Performance The fix appears not to add significant overhead to existing operations. It generally performs the same as the current code except in two cases, one in which it performs slightly worse (Case 5 below) and one in which it performs very significantly better (Case 2 below). Overall it's a wash except in those two cases. I've since done somewhat more involved testing on a dual-core Opteron system. Case 1: With no itimer running, for a test with 100,000 threads, the fixed kernel took 1428.5 seconds, 513 seconds more than the unfixed system, all of which was spent in the system. There were twice as many voluntary context switches with the fix as without it. Case 2: With an itimer running at .01 second ticks and 4000 threads (the most an unmodified kernel can handle), the fixed kernel ran the test in eight percent of the time (5.8 seconds as opposed to 70 seconds) and had better tick accuracy (.012 seconds per tick as opposed to .023 seconds per tick). Case 3: A 4000-thread test with an initial timer tick of .01 second and an interval of 10,000 seconds (i.e. a timer that ticks only once) had very nearly the same performance in both cases: 6.3 seconds elapsed for the fixed kernel versus 5.5 seconds for the unfixed kernel. With fewer threads (eight in these tests), the Case 1 test ran in essentially the same time on both the modified and unmodified kernels (5.2 seconds versus 5.8 seconds). The Case 2 test ran in about the same time as well, 5.9 seconds versus 5.4 seconds but again with much better tick accuracy, .013 seconds per tick versus .025 seconds per tick for the unmodified kernel. Since the fix affected the rlimit code, I also tested soft and hard CPU limits. Case 4: With a hard CPU limit of 20 seconds and eight threads (and an itimer running), the modified kernel was very slightly favored in that while it killed the process in 19.997 seconds of CPU time (5.002 seconds of wall time), only .003 seconds of that was system time, the rest was user time. The unmodified kernel killed the process in 20.001 seconds of CPU (5.014 seconds of wall time) of which .016 seconds was system time. Really, though, the results were too close to call. The results were essentially the same with no itimer running. Case 5: With a soft limit of 20 seconds and a hard limit of 2000 seconds (where the hard limit would never be reached) and an itimer running, the modified kernel exhibited worse tick accuracy than the unmodified kernel: .050 seconds/tick versus .028 seconds/tick. Otherwise, performance was almost indistinguishable. With no itimer running this test exhibited virtually identical behavior and times in both cases. In times past I did some limited performance testing. those results are below. On a four-cpu Opteron system without this fix, a sixteen-thread test executed in 3569.991 seconds, of which user was 3568.435s and system was 1.556s. On the same system with the fix, user and elapsed time were about the same, but system time dropped to 0.007 seconds. Performance with eight, four and one thread were comparable. Interestingly, the timer ticks with the fix seemed more accurate: The sixteen-thread test with the fix received 149543 ticks for 0.024 seconds per tick, while the same test without the fix received 58720 for 0.061 seconds per tick. Both cases were configured for an interval of 0.01 seconds. Again, the other tests were comparable. Each thread in this test computed the primes up to 25,000,000. I also did a test with a large number of threads, 100,000 threads, which is impossible without the fix. In this case each thread computed the primes only up to 10,000 (to make the runtime manageable). System time dominated, at 1546.968 seconds out of a total 2176.906 seconds (giving a user time of 629.938s). It received 147651 ticks for 0.015 seconds per tick, still quite accurate. There is obviously no comparable test without the fix. Signed-off-by: Frank Mayhar Cc: Roland McGrath Cc: Alexey Dobriyan Cc: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 655ed8d..a8635f6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1333,20 +1333,15 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { + struct task_cputime cputime; + /* - * This is the record for the group leader. Add in the - * cumulative times of previous dead threads. This total - * won't include the time of each live thread whose state - * is included in the core dump. The final total reported - * to our parent process when it calls wait4 will include - * those sums as well as the little bit more time it takes - * this and each other thread to finish dying after the - * core dump synchronization phase. + * This is the record for the group leader. It shows the + * group-wide total, not its individual thread total. */ - cputime_to_timeval(cputime_add(p->utime, p->signal->utime), - &prstatus->pr_utime); - cputime_to_timeval(cputime_add(p->stime, p->signal->stime), - &prstatus->pr_stime); + thread_group_cputime(p, &cputime); + cputime_to_timeval(cputime.utime, &prstatus->pr_utime); + cputime_to_timeval(cputime.stime, &prstatus->pr_stime); } else { cputime_to_timeval(p->utime, &prstatus->pr_utime); cputime_to_timeval(p->stime, &prstatus->pr_stime); diff --git a/fs/proc/array.c b/fs/proc/array.c index 71c9be5..933953c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -395,20 +395,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, /* add up live thread stats at the group level */ if (whole) { + struct task_cputime cputime; struct task_struct *t = task; do { min_flt += t->min_flt; maj_flt += t->maj_flt; - utime = cputime_add(utime, task_utime(t)); - stime = cputime_add(stime, task_stime(t)); gtime = cputime_add(gtime, task_gtime(t)); t = next_thread(t); } while (t != task); min_flt += sig->min_flt; maj_flt += sig->maj_flt; - utime = cputime_add(utime, sig->utime); - stime = cputime_add(stime, sig->stime); + thread_group_cputime(task, &cputime); + utime = cputime.utime; + stime = cputime.stime; gtime = cputime_add(gtime, sig->gtime); } diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index a7dd38f..f9d8e9e 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -115,4 +115,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, long clock_nanosleep_restart(struct restart_block *restart_block); +void update_rlimit_cpu(unsigned long rlim_new); + #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 3d9120c..26d7a5f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -425,6 +425,45 @@ struct pacct_struct { unsigned long ac_minflt, ac_majflt; }; +/** + * struct task_cputime - collected CPU time counts + * @utime: time spent in user mode, in &cputime_t units + * @stime: time spent in kernel mode, in &cputime_t units + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are + * tracked for threads and thread groups. Most things considering + * CPU time want to group these counts together and treat all three + * of them in parallel. + */ +struct task_cputime { + cputime_t utime; + cputime_t stime; + unsigned long long sum_exec_runtime; +}; +/* Alternate field names when used to cache expirations. */ +#define prof_exp stime +#define virt_exp utime +#define sched_exp sum_exec_runtime + +/** + * struct thread_group_cputime - thread group interval timer counts + * @totals: thread group interval timers; substructure for + * uniprocessor kernel, per-cpu for SMP kernel. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU clock calculations. + */ +#ifdef CONFIG_SMP +struct thread_group_cputime { + struct task_cputime *totals; +}; +#else +struct thread_group_cputime { + struct task_cputime totals; +}; +#endif + /* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always @@ -470,6 +509,17 @@ struct signal_struct { cputime_t it_prof_expires, it_virt_expires; cputime_t it_prof_incr, it_virt_incr; + /* + * Thread group totals for process CPU clocks. + * See thread_group_cputime(), et al, for details. + */ + struct thread_group_cputime cputime; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + /* job control IDs */ /* @@ -500,7 +550,7 @@ struct signal_struct { * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ - cputime_t utime, stime, cutime, cstime; + cputime_t cutime, cstime; cputime_t gtime; cputime_t cgtime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; @@ -509,14 +559,6 @@ struct signal_struct { struct task_io_accounting ioac; /* - * Cumulative ns of scheduled CPU time for dead threads in the - * group, not including a zombie group leader. (This only differs - * from jiffies_to_ns(utime + stime) if sched_clock uses something - * other than jiffies.) - */ - unsigned long long sum_sched_runtime; - - /* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one @@ -527,8 +569,6 @@ struct signal_struct { */ struct rlimit rlim[RLIM_NLIMITS]; - struct list_head cpu_timers[3]; - /* keep the process-shared keyrings here so that they do the right * thing in threads created with CLONE_THREAD */ #ifdef CONFIG_KEYS @@ -1134,8 +1174,7 @@ struct task_struct { /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt; - cputime_t it_prof_expires, it_virt_expires; - unsigned long long it_sched_expires; + struct task_cputime cputime_expires; struct list_head cpu_timers[3]; /* process credentials */ @@ -1585,6 +1624,7 @@ extern unsigned long long cpu_clock(int cpu); extern unsigned long long task_sched_runtime(struct task_struct *task); +extern unsigned long long thread_group_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -2082,6 +2122,197 @@ static inline int spin_needbreak(spinlock_t *lock) } /* + * Thread group CPU time accounting. + */ +#ifdef CONFIG_SMP + +extern int thread_group_cputime_alloc_smp(struct task_struct *); +extern void thread_group_cputime_smp(struct task_struct *, struct task_cputime *); + +static inline void thread_group_cputime_init(struct signal_struct *sig) +{ + sig->cputime.totals = NULL; +} + +static inline int thread_group_cputime_clone_thread(struct task_struct *curr, + struct task_struct *new) +{ + if (curr->signal->cputime.totals) + return 0; + return thread_group_cputime_alloc_smp(curr); +} + +static inline void thread_group_cputime_free(struct signal_struct *sig) +{ + free_percpu(sig->cputime.totals); +} + +/** + * thread_group_cputime - Sum the thread group time fields across all CPUs. + * + * This is a wrapper for the real routine, thread_group_cputime_smp(). See + * that routine for details. + */ +static inline void thread_group_cputime( + struct task_struct *tsk, + struct task_cputime *times) +{ + thread_group_cputime_smp(tsk, times); +} + +/** + * thread_group_cputime_account_user - Maintain utime for a thread group. + * + * @tgtimes: Pointer to thread_group_cputime structure. + * @cputime: Time value by which to increment the utime field of that + * structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void thread_group_cputime_account_user( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + if (tgtimes->totals) { + struct task_cputime *times; + + times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times->utime = cputime_add(times->utime, cputime); + put_cpu_no_resched(); + } +} + +/** + * thread_group_cputime_account_system - Maintain stime for a thread group. + * + * @tgtimes: Pointer to thread_group_cputime structure. + * @cputime: Time value by which to increment the stime field of that + * structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void thread_group_cputime_account_system( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + if (tgtimes->totals) { + struct task_cputime *times; + + times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times->stime = cputime_add(times->stime, cputime); + put_cpu_no_resched(); + } +} + +/** + * thread_group_cputime_account_exec_runtime - Maintain exec runtime for a + * thread group. + * + * @tgtimes: Pointer to thread_group_cputime structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of that structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void thread_group_cputime_account_exec_runtime( + struct thread_group_cputime *tgtimes, + unsigned long long ns) +{ + if (tgtimes->totals) { + struct task_cputime *times; + + times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times->sum_exec_runtime += ns; + put_cpu_no_resched(); + } +} + +#else /* CONFIG_SMP */ + +static inline void thread_group_cputime_init(struct signal_struct *sig) +{ + sig->cputime.totals.utime = cputime_zero; + sig->cputime.totals.stime = cputime_zero; + sig->cputime.totals.sum_exec_runtime = 0; +} + +static inline int thread_group_cputime_alloc(struct task_struct *tsk) +{ + return 0; +} + +static inline void thread_group_cputime_free(struct signal_struct *sig) +{ +} + +static inline int thread_group_cputime_clone_thread(struct task_struct *curr, + struct task_struct *tsk) +{ +} + +static inline void thread_group_cputime(struct task_struct *tsk, + struct task_cputime *cputime) +{ + *cputime = tsk->signal->cputime.totals; +} + +static inline void thread_group_cputime_account_user( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + tgtimes->totals->utime = cputime_add(tgtimes->totals->utime, cputime); +} + +static inline void thread_group_cputime_account_system( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + tgtimes->totals->stime = cputime_add(tgtimes->totals->stime, cputime); +} + +static inline void thread_group_cputime_account_exec_runtime( + struct thread_group_cputime *tgtimes, + unsigned long long ns) +{ + tgtimes->totals->sum_exec_runtime += ns; +} + +#endif /* CONFIG_SMP */ + +static inline void account_group_user_time(struct task_struct *tsk, + cputime_t cputime) +{ + struct signal_struct *sig; + + sig = tsk->signal; + if (likely(sig)) + thread_group_cputime_account_user(&sig->cputime, cputime); +} + +static inline void account_group_system_time(struct task_struct *tsk, + cputime_t cputime) +{ + struct signal_struct *sig; + + sig = tsk->signal; + if (likely(sig)) + thread_group_cputime_account_system(&sig->cputime, cputime); +} + +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct signal_struct *sig; + + sig = tsk->signal; + if (likely(sig)) + thread_group_cputime_account_exec_runtime(&sig->cputime, ns); +} + +/* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. diff --git a/include/linux/time.h b/include/linux/time.h index e15206a..1b70b3c 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -125,6 +125,9 @@ extern int timekeeping_valid_for_hres(void); extern void update_wall_time(void); extern void update_xtime_cache(u64 nsec); +struct tms; +extern void do_sys_times(struct tms *); + /** * timespec_to_ns - Convert timespec to nanoseconds * @ts: pointer to the timespec variable to be converted diff --git a/kernel/compat.c b/kernel/compat.c index 32c254a..72650e3 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -150,49 +151,23 @@ asmlinkage long compat_sys_setitimer(int which, return 0; } +static compat_clock_t clock_t_to_compat_clock_t(clock_t x) +{ + return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); +} + asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) { - /* - * In the SMP world we might just be unlucky and have one of - * the times increment as we use it. Since the value is an - * atomically safe type this is just fine. Conceptually its - * as if the syscall took an instant longer to occur. - */ if (tbuf) { + struct tms tms; struct compat_tms tmp; - struct task_struct *tsk = current; - struct task_struct *t; - cputime_t utime, stime, cutime, cstime; - - read_lock(&tasklist_lock); - utime = tsk->signal->utime; - stime = tsk->signal->stime; - t = tsk; - do { - utime = cputime_add(utime, t->utime); - stime = cputime_add(stime, t->stime); - t = next_thread(t); - } while (t != tsk); - - /* - * While we have tasklist_lock read-locked, no dying thread - * can be updating current->signal->[us]time. Instead, - * we got their counts included in the live thread loop. - * However, another thread can come in right now and - * do a wait call that updates current->signal->c[us]time. - * To make sure we always see that pair updated atomically, - * we take the siglock around fetching them. - */ - spin_lock_irq(&tsk->sighand->siglock); - cutime = tsk->signal->cutime; - cstime = tsk->signal->cstime; - spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); - - tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime)); - tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime)); - tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime)); - tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime)); + + do_sys_times(&tms); + /* Convert our struct tms to the compat version. */ + tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); + tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); + tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); + tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); if (copy_to_user(tbuf, &tmp, sizeof(tmp))) return -EFAULT; } diff --git a/kernel/exit.c b/kernel/exit.c index 1639564..40036ac 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -112,8 +112,6 @@ static void __exit_signal(struct task_struct *tsk) * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ - sig->utime = cputime_add(sig->utime, task_utime(tsk)); - sig->stime = cputime_add(sig->stime, task_stime(tsk)); sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; @@ -122,7 +120,6 @@ static void __exit_signal(struct task_struct *tsk) sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); - sig->sum_sched_runtime += tsk->se.sum_exec_runtime; sig = NULL; /* Marker for below. */ } @@ -1294,6 +1291,7 @@ static int wait_task_zombie(struct task_struct *p, int options, if (likely(!traced)) { struct signal_struct *psig; struct signal_struct *sig; + struct task_cputime cputime; /* * The resource counters for the group leader are in its @@ -1309,20 +1307,23 @@ static int wait_task_zombie(struct task_struct *p, int options, * need to protect the access to p->parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. + * + * We use thread_group_cputime() to get times for the thread + * group, which consolidates times for all threads in the + * group including the group leader. */ spin_lock_irq(&p->parent->sighand->siglock); psig = p->parent->signal; sig = p->signal; + thread_group_cputime(p, &cputime); psig->cutime = cputime_add(psig->cutime, - cputime_add(p->utime, - cputime_add(sig->utime, - sig->cutime))); + cputime_add(cputime.utime, + sig->cutime)); psig->cstime = cputime_add(psig->cstime, - cputime_add(p->stime, - cputime_add(sig->stime, - sig->cstime))); + cputime_add(cputime.stime, + sig->cstime)); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, diff --git a/kernel/fork.c b/kernel/fork.c index 7ce2ebe..a8ac2ef 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -759,15 +759,44 @@ void __cleanup_sighand(struct sighand_struct *sighand) kmem_cache_free(sighand_cachep, sighand); } + +/* + * Initialize POSIX timer handling for a thread group. + */ +static void posix_cpu_timers_init_group(struct signal_struct *sig) +{ + /* Thread group counters. */ + thread_group_cputime_init(sig); + + /* Expiration times and increments. */ + sig->it_virt_expires = cputime_zero; + sig->it_virt_incr = cputime_zero; + sig->it_prof_expires = cputime_zero; + sig->it_prof_incr = cputime_zero; + + /* Cached expiration times. */ + sig->cputime_expires.prof_exp = cputime_zero; + sig->cputime_expires.virt_exp = cputime_zero; + sig->cputime_expires.sched_exp = 0; + + /* The timer lists. */ + INIT_LIST_HEAD(&sig->cpu_timers[0]); + INIT_LIST_HEAD(&sig->cpu_timers[1]); + INIT_LIST_HEAD(&sig->cpu_timers[2]); +} + static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; int ret; if (clone_flags & CLONE_THREAD) { - atomic_inc(¤t->signal->count); - atomic_inc(¤t->signal->live); - return 0; + ret = thread_group_cputime_clone_thread(current, tsk); + if (likely(!ret)) { + atomic_inc(¤t->signal->count); + atomic_inc(¤t->signal->live); + } + return ret; } sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; @@ -795,15 +824,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->it_real_incr.tv64 = 0; sig->real_timer.function = it_real_fn; - sig->it_virt_expires = cputime_zero; - sig->it_virt_incr = cputime_zero; - sig->it_prof_expires = cputime_zero; - sig->it_prof_incr = cputime_zero; - sig->leader = 0; /* session leadership doesn't inherit */ sig->tty_old_pgrp = NULL; - sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; + sig->cutime = sig->cstime = cputime_zero; sig->gtime = cputime_zero; sig->cgtime = cputime_zero; sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; @@ -820,14 +844,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); - if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { - /* - * New sole thread in the process gets an expiry time - * of the whole CPU time limit. - */ - tsk->it_prof_expires = - secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); - } + posix_cpu_timers_init_group(sig); + acct_init_pacct(&sig->pacct); tty_audit_fork(sig); @@ -837,6 +855,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) void __cleanup_signal(struct signal_struct *sig) { + thread_group_cputime_free(sig); exit_thread_group_keys(sig); kmem_cache_free(signal_cachep, sig); } @@ -886,6 +905,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) #endif /* CONFIG_MM_OWNER */ /* + * Initialize POSIX timer handling for a single task. + */ +static void posix_cpu_timers_init(struct task_struct *tsk) +{ + tsk->cputime_expires.prof_exp = cputime_zero; + tsk->cputime_expires.virt_exp = cputime_zero; + tsk->cputime_expires.sched_exp = 0; + INIT_LIST_HEAD(&tsk->cpu_timers[0]); + INIT_LIST_HEAD(&tsk->cpu_timers[1]); + INIT_LIST_HEAD(&tsk->cpu_timers[2]); +} + +/* * This creates a new process as a copy of the old one, * but does not actually start it yet. * @@ -995,12 +1027,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, task_io_accounting_init(&p->ioac); acct_clear_integrals(p); - p->it_virt_expires = cputime_zero; - p->it_prof_expires = cputime_zero; - p->it_sched_expires = 0; - INIT_LIST_HEAD(&p->cpu_timers[0]); - INIT_LIST_HEAD(&p->cpu_timers[1]); - INIT_LIST_HEAD(&p->cpu_timers[2]); + posix_cpu_timers_init(p); p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); @@ -1201,21 +1228,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (clone_flags & CLONE_THREAD) { p->group_leader = current->group_leader; list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); - - if (!cputime_eq(current->signal->it_virt_expires, - cputime_zero) || - !cputime_eq(current->signal->it_prof_expires, - cputime_zero) || - current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || - !list_empty(¤t->signal->cpu_timers[0]) || - !list_empty(¤t->signal->cpu_timers[1]) || - !list_empty(¤t->signal->cpu_timers[2])) { - /* - * Have child wake up on its first tick to check - * for process CPU timers. - */ - p->it_prof_expires = jiffies_to_cputime(1); - } } if (likely(p->pid)) { diff --git a/kernel/itimer.c b/kernel/itimer.c index ab98274..db7c358 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value) spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: - read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_virt_expires; cinterval = tsk->signal->it_virt_incr; if (!cputime_eq(cval, cputime_zero)) { - struct task_struct *t = tsk; - cputime_t utime = tsk->signal->utime; - do { - utime = cputime_add(utime, t->utime); - t = next_thread(t); - } while (t != tsk); + struct task_cputime cputime; + cputime_t utime; + + thread_group_cputime(tsk, &cputime); + utime = cputime.utime; if (cputime_le(cval, utime)) { /* about to fire */ cval = jiffies_to_cputime(1); } else { @@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value) } } spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); cputime_to_timeval(cval, &value->it_value); cputime_to_timeval(cinterval, &value->it_interval); break; case ITIMER_PROF: - read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_prof_expires; cinterval = tsk->signal->it_prof_incr; if (!cputime_eq(cval, cputime_zero)) { - struct task_struct *t = tsk; - cputime_t ptime = cputime_add(tsk->signal->utime, - tsk->signal->stime); - do { - ptime = cputime_add(ptime, - cputime_add(t->utime, - t->stime)); - t = next_thread(t); - } while (t != tsk); + struct task_cputime times; + cputime_t ptime; + + thread_group_cputime(tsk, ×); + ptime = cputime_add(times.utime, times.stime); if (cputime_le(cval, ptime)) { /* about to fire */ cval = jiffies_to_cputime(1); } else { @@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value) } } spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); cputime_to_timeval(cval, &value->it_value); cputime_to_timeval(cinterval, &value->it_interval); break; @@ -185,7 +176,6 @@ again: case ITIMER_VIRTUAL: nval = timeval_to_cputime(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); - read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_virt_expires; cinterval = tsk->signal->it_virt_incr; @@ -200,7 +190,6 @@ again: tsk->signal->it_virt_expires = nval; tsk->signal->it_virt_incr = ninterval; spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); if (ovalue) { cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cinterval, &ovalue->it_interval); @@ -209,7 +198,6 @@ again: case ITIMER_PROF: nval = timeval_to_cputime(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); - read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_prof_expires; cinterval = tsk->signal->it_prof_incr; @@ -224,7 +212,6 @@ again: tsk->signal->it_prof_expires = nval; tsk->signal->it_prof_incr = ninterval; spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); if (ovalue) { cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cinterval, &ovalue->it_interval); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c42a03a..dba1c33 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -8,6 +8,99 @@ #include #include +#ifdef CONFIG_SMP +/* + * Allocate the thread_group_cputime structure appropriately for SMP kernels + * and fill in the current values of the fields. Called from copy_signal() + * via thread_group_cputime_clone_thread() when adding a second or subsequent + * thread to a thread group. Assumes interrupts are enabled when called. + */ +int thread_group_cputime_alloc_smp(struct task_struct *tsk) +{ + struct signal_struct *sig = tsk->signal; + struct task_cputime *cputime; + + /* + * If we have multiple threads and we don't already have a + * per-CPU task_cputime struct, allocate one and fill it in with + * the times accumulated so far. + */ + if (sig->cputime.totals) + return 0; + cputime = alloc_percpu(struct task_cputime); + if (cputime == NULL) + return -ENOMEM; + read_lock(&tasklist_lock); + spin_lock_irq(&tsk->sighand->siglock); + if (sig->cputime.totals) { + spin_unlock_irq(&tsk->sighand->siglock); + read_unlock(&tasklist_lock); + free_percpu(cputime); + return 0; + } + sig->cputime.totals = cputime; + cputime = per_cpu_ptr(sig->cputime.totals, get_cpu()); + cputime->utime = tsk->utime; + cputime->stime = tsk->stime; + cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; + put_cpu_no_resched(); + spin_unlock_irq(&tsk->sighand->siglock); + read_unlock(&tasklist_lock); + return 0; +} + +/** + * thread_group_cputime_smp - Sum the thread group time fields across all CPUs. + * + * @tsk: The task we use to identify the thread group. + * @times: task_cputime structure in which we return the summed fields. + * + * Walk the list of CPUs to sum the per-CPU time fields in the thread group + * time structure. + */ +void thread_group_cputime_smp( + struct task_struct *tsk, + struct task_cputime *times) +{ + struct signal_struct *sig; + int i; + struct task_cputime *tot; + + sig = tsk->signal; + if (unlikely(!sig) || !sig->cputime.totals) { + times->utime = tsk->utime; + times->stime = tsk->stime; + times->sum_exec_runtime = tsk->se.sum_exec_runtime; + return; + } + times->stime = times->utime = cputime_zero; + times->sum_exec_runtime = 0; + for_each_possible_cpu(i) { + tot = per_cpu_ptr(tsk->signal->cputime.totals, i); + times->utime = cputime_add(times->utime, tot->utime); + times->stime = cputime_add(times->stime, tot->stime); + times->sum_exec_runtime += tot->sum_exec_runtime; + } +} + +#endif /* CONFIG_SMP */ + +/* + * Called after updating RLIMIT_CPU to set timer expiration if necessary. + */ +void update_rlimit_cpu(unsigned long rlim_new) +{ + cputime_t cputime; + + cputime = secs_to_cputime(rlim_new); + if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || + cputime_lt(current->signal->it_prof_expires, cputime)) { + spin_lock_irq(¤t->sighand->siglock); + set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); + spin_unlock_irq(¤t->sighand->siglock); + } +} + static int check_clock(const clockid_t which_clock) { int error = 0; @@ -158,10 +251,6 @@ static inline cputime_t virt_ticks(struct task_struct *p) { return p->utime; } -static inline unsigned long long sched_ns(struct task_struct *p) -{ - return task_sched_runtime(p); -} int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) { @@ -211,7 +300,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, cpu->cpu = virt_ticks(p); break; case CPUCLOCK_SCHED: - cpu->sched = sched_ns(p); + cpu->sched = task_sched_runtime(p); break; } return 0; @@ -226,31 +315,20 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx, struct task_struct *p, union cpu_time_count *cpu) { - struct task_struct *t = p; - switch (clock_idx) { + struct task_cputime cputime; + + thread_group_cputime(p, &cputime); + switch (clock_idx) { default: return -EINVAL; case CPUCLOCK_PROF: - cpu->cpu = cputime_add(p->signal->utime, p->signal->stime); - do { - cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t)); - t = next_thread(t); - } while (t != p); + cpu->cpu = cputime_add(cputime.utime, cputime.stime); break; case CPUCLOCK_VIRT: - cpu->cpu = p->signal->utime; - do { - cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t)); - t = next_thread(t); - } while (t != p); + cpu->cpu = cputime.utime; break; case CPUCLOCK_SCHED: - cpu->sched = p->signal->sum_sched_runtime; - /* Add in each other live thread. */ - while ((t = next_thread(t)) != p) { - cpu->sched += t->se.sum_exec_runtime; - } - cpu->sched += sched_ns(p); + cpu->sched = thread_group_sched_runtime(p); break; } return 0; @@ -471,80 +549,11 @@ void posix_cpu_timers_exit(struct task_struct *tsk) } void posix_cpu_timers_exit_group(struct task_struct *tsk) { - cleanup_timers(tsk->signal->cpu_timers, - cputime_add(tsk->utime, tsk->signal->utime), - cputime_add(tsk->stime, tsk->signal->stime), - tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime); -} - - -/* - * Set the expiry times of all the threads in the process so one of them - * will go off before the process cumulative expiry total is reached. - */ -static void process_timer_rebalance(struct task_struct *p, - unsigned int clock_idx, - union cpu_time_count expires, - union cpu_time_count val) -{ - cputime_t ticks, left; - unsigned long long ns, nsleft; - struct task_struct *t = p; - unsigned int nthreads = atomic_read(&p->signal->live); - - if (!nthreads) - return; + struct task_cputime cputime; - switch (clock_idx) { - default: - BUG(); - break; - case CPUCLOCK_PROF: - left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), - nthreads); - do { - if (likely(!(t->flags & PF_EXITING))) { - ticks = cputime_add(prof_ticks(t), left); - if (cputime_eq(t->it_prof_expires, - cputime_zero) || - cputime_gt(t->it_prof_expires, ticks)) { - t->it_prof_expires = ticks; - } - } - t = next_thread(t); - } while (t != p); - break; - case CPUCLOCK_VIRT: - left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), - nthreads); - do { - if (likely(!(t->flags & PF_EXITING))) { - ticks = cputime_add(virt_ticks(t), left); - if (cputime_eq(t->it_virt_expires, - cputime_zero) || - cputime_gt(t->it_virt_expires, ticks)) { - t->it_virt_expires = ticks; - } - } - t = next_thread(t); - } while (t != p); - break; - case CPUCLOCK_SCHED: - nsleft = expires.sched - val.sched; - do_div(nsleft, nthreads); - nsleft = max_t(unsigned long long, nsleft, 1); - do { - if (likely(!(t->flags & PF_EXITING))) { - ns = t->se.sum_exec_runtime + nsleft; - if (t->it_sched_expires == 0 || - t->it_sched_expires > ns) { - t->it_sched_expires = ns; - } - } - t = next_thread(t); - } while (t != p); - break; - } + thread_group_cputime(tsk, &cputime); + cleanup_timers(tsk->signal->cpu_timers, + cputime.utime, cputime.stime, cputime.sum_exec_runtime); } static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) @@ -608,29 +617,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) default: BUG(); case CPUCLOCK_PROF: - if (cputime_eq(p->it_prof_expires, + if (cputime_eq(p->cputime_expires.prof_exp, cputime_zero) || - cputime_gt(p->it_prof_expires, + cputime_gt(p->cputime_expires.prof_exp, nt->expires.cpu)) - p->it_prof_expires = nt->expires.cpu; + p->cputime_expires.prof_exp = + nt->expires.cpu; break; case CPUCLOCK_VIRT: - if (cputime_eq(p->it_virt_expires, + if (cputime_eq(p->cputime_expires.virt_exp, cputime_zero) || - cputime_gt(p->it_virt_expires, + cputime_gt(p->cputime_expires.virt_exp, nt->expires.cpu)) - p->it_virt_expires = nt->expires.cpu; + p->cputime_expires.virt_exp = + nt->expires.cpu; break; case CPUCLOCK_SCHED: - if (p->it_sched_expires == 0 || - p->it_sched_expires > nt->expires.sched) - p->it_sched_expires = nt->expires.sched; + if (p->cputime_expires.sched_exp == 0 || + p->cputime_expires.sched_exp > + nt->expires.sched) + p->cputime_expires.sched_exp = + nt->expires.sched; break; } } else { /* - * For a process timer, we must balance - * all the live threads' expirations. + * For a process timer, set the cached expiration time. */ switch (CPUCLOCK_WHICH(timer->it_clock)) { default: @@ -641,7 +653,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) cputime_lt(p->signal->it_virt_expires, timer->it.cpu.expires.cpu)) break; - goto rebalance; + p->signal->cputime_expires.virt_exp = + timer->it.cpu.expires.cpu; + break; case CPUCLOCK_PROF: if (!cputime_eq(p->signal->it_prof_expires, cputime_zero) && @@ -652,13 +666,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) if (i != RLIM_INFINITY && i <= cputime_to_secs(timer->it.cpu.expires.cpu)) break; - goto rebalance; + p->signal->cputime_expires.prof_exp = + timer->it.cpu.expires.cpu; + break; case CPUCLOCK_SCHED: - rebalance: - process_timer_rebalance( - timer->it.cpu.task, - CPUCLOCK_WHICH(timer->it_clock), - timer->it.cpu.expires, now); + p->signal->cputime_expires.sched_exp = + timer->it.cpu.expires.sched; break; } } @@ -969,13 +982,13 @@ static void check_thread_timers(struct task_struct *tsk, struct signal_struct *const sig = tsk->signal; maxfire = 20; - tsk->it_prof_expires = cputime_zero; + tsk->cputime_expires.prof_exp = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { - tsk->it_prof_expires = t->expires.cpu; + tsk->cputime_expires.prof_exp = t->expires.cpu; break; } t->firing = 1; @@ -984,13 +997,13 @@ static void check_thread_timers(struct task_struct *tsk, ++timers; maxfire = 20; - tsk->it_virt_expires = cputime_zero; + tsk->cputime_expires.virt_exp = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { - tsk->it_virt_expires = t->expires.cpu; + tsk->cputime_expires.virt_exp = t->expires.cpu; break; } t->firing = 1; @@ -999,13 +1012,13 @@ static void check_thread_timers(struct task_struct *tsk, ++timers; maxfire = 20; - tsk->it_sched_expires = 0; + tsk->cputime_expires.sched_exp = 0; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { - tsk->it_sched_expires = t->expires.sched; + tsk->cputime_expires.sched_exp = t->expires.sched; break; } t->firing = 1; @@ -1055,10 +1068,10 @@ static void check_process_timers(struct task_struct *tsk, { int maxfire; struct signal_struct *const sig = tsk->signal; - cputime_t utime, stime, ptime, virt_expires, prof_expires; + cputime_t utime, ptime, virt_expires, prof_expires; unsigned long long sum_sched_runtime, sched_expires; - struct task_struct *t; struct list_head *timers = sig->cpu_timers; + struct task_cputime cputime; /* * Don't sample the current process CPU clocks if there are no timers. @@ -1074,18 +1087,10 @@ static void check_process_timers(struct task_struct *tsk, /* * Collect the current process totals. */ - utime = sig->utime; - stime = sig->stime; - sum_sched_runtime = sig->sum_sched_runtime; - t = tsk; - do { - utime = cputime_add(utime, t->utime); - stime = cputime_add(stime, t->stime); - sum_sched_runtime += t->se.sum_exec_runtime; - t = next_thread(t); - } while (t != tsk); - ptime = cputime_add(utime, stime); - + thread_group_cputime(tsk, &cputime); + utime = cputime.utime; + ptime = cputime_add(utime, cputime.stime); + sum_sched_runtime = cputime.sum_exec_runtime; maxfire = 20; prof_expires = cputime_zero; while (!list_empty(timers)) { @@ -1193,60 +1198,18 @@ static void check_process_timers(struct task_struct *tsk, } } - if (!cputime_eq(prof_expires, cputime_zero) || - !cputime_eq(virt_expires, cputime_zero) || - sched_expires != 0) { - /* - * Rebalance the threads' expiry times for the remaining - * process CPU timers. - */ - - cputime_t prof_left, virt_left, ticks; - unsigned long long sched_left, sched; - const unsigned int nthreads = atomic_read(&sig->live); - - if (!nthreads) - return; - - prof_left = cputime_sub(prof_expires, utime); - prof_left = cputime_sub(prof_left, stime); - prof_left = cputime_div_non_zero(prof_left, nthreads); - virt_left = cputime_sub(virt_expires, utime); - virt_left = cputime_div_non_zero(virt_left, nthreads); - if (sched_expires) { - sched_left = sched_expires - sum_sched_runtime; - do_div(sched_left, nthreads); - sched_left = max_t(unsigned long long, sched_left, 1); - } else { - sched_left = 0; - } - t = tsk; - do { - if (unlikely(t->flags & PF_EXITING)) - continue; - - ticks = cputime_add(cputime_add(t->utime, t->stime), - prof_left); - if (!cputime_eq(prof_expires, cputime_zero) && - (cputime_eq(t->it_prof_expires, cputime_zero) || - cputime_gt(t->it_prof_expires, ticks))) { - t->it_prof_expires = ticks; - } - - ticks = cputime_add(t->utime, virt_left); - if (!cputime_eq(virt_expires, cputime_zero) && - (cputime_eq(t->it_virt_expires, cputime_zero) || - cputime_gt(t->it_virt_expires, ticks))) { - t->it_virt_expires = ticks; - } - - sched = t->se.sum_exec_runtime + sched_left; - if (sched_expires && (t->it_sched_expires == 0 || - t->it_sched_expires > sched)) { - t->it_sched_expires = sched; - } - } while ((t = next_thread(t)) != tsk); - } + if (!cputime_eq(prof_expires, cputime_zero) && + (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || + cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) + sig->cputime_expires.prof_exp = prof_expires; + if (!cputime_eq(virt_expires, cputime_zero) && + (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) || + cputime_gt(sig->cputime_expires.virt_exp, virt_expires))) + sig->cputime_expires.virt_exp = virt_expires; + if (sched_expires != 0 && + (sig->cputime_expires.sched_exp == 0 || + sig->cputime_expires.sched_exp > sched_expires)) + sig->cputime_expires.sched_exp = sched_expires; } /* @@ -1314,6 +1277,78 @@ out: ++timer->it_requeue_pending; } +/** + * task_cputime_zero - Check a task_cputime struct for all zero fields. + * + * @cputime: The struct to compare. + * + * Checks @cputime to see if all fields are zero. Returns true if all fields + * are zero, false if any field is nonzero. + */ +static inline int task_cputime_zero(const struct task_cputime *cputime) +{ + if (cputime_eq(cputime->utime, cputime_zero) && + cputime_eq(cputime->stime, cputime_zero) && + cputime->sum_exec_runtime == 0) + return 1; + return 0; +} + +/** + * task_cputime_expired - Compare two task_cputime entities. + * + * @sample: The task_cputime structure to be checked for expiration. + * @expires: Expiration times, against which @sample will be checked. + * + * Checks @sample against @expires to see if any field of @sample has expired. + * Returns true if any field of the former is greater than the corresponding + * field of the latter if the latter field is set. Otherwise returns false. + */ +static inline int task_cputime_expired(const struct task_cputime *sample, + const struct task_cputime *expires) +{ + if (!cputime_eq(expires->utime, cputime_zero) && + cputime_ge(sample->utime, expires->utime)) + return 1; + if (!cputime_eq(expires->stime, cputime_zero) && + cputime_ge(cputime_add(sample->utime, sample->stime), + expires->stime)) + return 1; + if (expires->sum_exec_runtime != 0 && + sample->sum_exec_runtime >= expires->sum_exec_runtime) + return 1; + return 0; +} + +/** + * fastpath_timer_check - POSIX CPU timers fast path. + * + * @tsk: The task (thread) being checked. + * @sig: The signal pointer for that task. + * + * If there are no timers set return false. Otherwise snapshot the task and + * thread group timers, then compare them with the corresponding expiration + # times. Returns true if a timer has expired, else returns false. + */ +static inline int fastpath_timer_check(struct task_struct *tsk, + struct signal_struct *sig) +{ + struct task_cputime task_sample = { + .utime = tsk->utime, + .stime = tsk->stime, + .sum_exec_runtime = tsk->se.sum_exec_runtime + }; + struct task_cputime group_sample; + + if (task_cputime_zero(&tsk->cputime_expires) && + task_cputime_zero(&sig->cputime_expires)) + return 0; + if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) + return 1; + thread_group_cputime(tsk, &group_sample); + return task_cputime_expired(&group_sample, &sig->cputime_expires); +} + /* * This is called from the timer interrupt handler. The irq handler has * already updated our counts. We need to check if any timers fire now. @@ -1323,30 +1358,29 @@ void run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; + struct signal_struct *sig; + struct sighand_struct *sighand; + unsigned long flags; BUG_ON(!irqs_disabled()); -#define UNEXPIRED(clock) \ - (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ - cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) - - if (UNEXPIRED(prof) && UNEXPIRED(virt) && - (tsk->it_sched_expires == 0 || - tsk->se.sum_exec_runtime < tsk->it_sched_expires)) - return; - -#undef UNEXPIRED - + /* Pick up tsk->signal and make sure it's valid. */ + sig = tsk->signal; /* - * Double-check with locks held. + * The fast path checks that there are no expired thread or thread + * group timers. If that's so, just return. Also check that + * tsk->signal is non-NULL; this probably can't happen but cover the + * possibility anyway. */ - read_lock(&tasklist_lock); - if (likely(tsk->signal != NULL)) { - spin_lock(&tsk->sighand->siglock); - + if (unlikely(!sig) || !fastpath_timer_check(tsk, sig)) { + return; + } + sighand = lock_task_sighand(tsk, &flags); + if (likely(sighand)) { /* - * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] - * all the timers that are firing, and put them on the firing list. + * Here we take off tsk->signal->cpu_timers[N] and + * tsk->cpu_timers[N] all the timers that are firing, and + * put them on the firing list. */ check_thread_timers(tsk, &firing); check_process_timers(tsk, &firing); @@ -1359,9 +1393,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) * that gets the timer lock before we do will give it up and * spin until we've taken care of that timer below. */ - spin_unlock(&tsk->sighand->siglock); } - read_unlock(&tasklist_lock); + unlock_task_sighand(tsk, &flags); /* * Now that all the timers on our list have the firing flag, @@ -1389,10 +1422,9 @@ void run_posix_cpu_timers(struct task_struct *tsk) /* * Set one of the process-wide special case CPU timers. - * The tasklist_lock and tsk->sighand->siglock must be held by the caller. - * The oldval argument is null for the RLIMIT_CPU timer, where *newval is - * absolute; non-null for ITIMER_*, where *newval is relative and we update - * it to be absolute, *oldval is absolute and we update it to be relative. + * The tsk->sighand->siglock must be held by the caller. + * The *newval argument is relative and we update it to be absolute, *oldval + * is absolute and we update it to be relative. */ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, cputime_t *newval, cputime_t *oldval) @@ -1435,13 +1467,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, cputime_ge(list_first_entry(head, struct cpu_timer_list, entry)->expires.cpu, *newval)) { - /* - * Rejigger each thread's expiry time so that one will - * notice before we hit the process-cumulative expiry time. - */ - union cpu_time_count expires = { .sched = 0 }; - expires.cpu = *newval; - process_timer_rebalance(tsk, clock_idx, expires, now); + switch (clock_idx) { + case CPUCLOCK_PROF: + tsk->signal->cputime_expires.prof_exp = *newval; + break; + case CPUCLOCK_VIRT: + tsk->signal->cputime_expires.virt_exp = *newval; + break; + } } } diff --git a/kernel/sched.c b/kernel/sched.c index cc1f81b..c51b5d2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4037,23 +4037,56 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); /* + * Return any ns on the sched_clock that have not yet been banked in + * @p in case that task is currently running. + * + * Called with task_rq_lock() held on @rq. + */ +static unsigned long long task_delta_exec(struct task_struct *p, struct rq *rq) +{ + if (task_current(rq, p)) { + u64 delta_exec; + + update_rq_clock(rq); + delta_exec = rq->clock - p->se.exec_start; + if ((s64)delta_exec > 0) + return delta_exec; + } + return 0; +} + +/* * Return p->sum_exec_runtime plus any more ns on the sched_clock * that have not yet been banked in case the task is currently running. */ unsigned long long task_sched_runtime(struct task_struct *p) { unsigned long flags; - u64 ns, delta_exec; + u64 ns; struct rq *rq; rq = task_rq_lock(p, &flags); - ns = p->se.sum_exec_runtime; - if (task_current(rq, p)) { - update_rq_clock(rq); - delta_exec = rq->clock - p->se.exec_start; - if ((s64)delta_exec > 0) - ns += delta_exec; - } + ns = p->se.sum_exec_runtime + task_delta_exec(p, rq); + task_rq_unlock(rq, &flags); + + return ns; +} + +/* + * Return sum_exec_runtime for the thread group plus any more ns on the + * sched_clock that have not yet been banked in case the task is currently + * running. + */ +unsigned long long thread_group_sched_runtime(struct task_struct *p) +{ + unsigned long flags; + u64 ns; + struct rq *rq; + struct task_cputime totals; + + rq = task_rq_lock(p, &flags); + thread_group_cputime(p, &totals); + ns = totals.sum_exec_runtime + task_delta_exec(p, rq); task_rq_unlock(rq, &flags); return ns; @@ -4070,6 +4103,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) cputime64_t tmp; p->utime = cputime_add(p->utime, cputime); + account_group_user_time(p, cputime); /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); @@ -4094,6 +4128,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime) tmp = cputime_to_cputime64(cputime); p->utime = cputime_add(p->utime, cputime); + account_group_user_time(p, cputime); p->gtime = cputime_add(p->gtime, cputime); cpustat->user = cputime64_add(cpustat->user, tmp); @@ -4129,6 +4164,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, } p->stime = cputime_add(p->stime, cputime); + account_group_system_time(p, cputime); /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); @@ -4170,6 +4206,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal) if (p == rq->idle) { p->stime = cputime_add(p->stime, steal); + account_group_system_time(p, steal); if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fb8994c..99aa31a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -507,6 +507,7 @@ static void update_curr(struct cfs_rq *cfs_rq) struct task_struct *curtask = task_of(curr); cpuacct_charge(curtask, delta_exec); + account_group_exec_runtime(curtask, delta_exec); } } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 5523107..8375e69 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -483,6 +483,8 @@ static void update_curr_rt(struct rq *rq) schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + curr->se.exec_start = rq->clock; cpuacct_charge(curr, delta_exec); @@ -1412,7 +1414,7 @@ static void watchdog(struct rq *rq, struct task_struct *p) p->rt.timeout++; next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); if (p->rt.timeout > next) - p->it_sched_expires = p->se.sum_exec_runtime; + p->cputime_expires.sched_exp = p->se.sum_exec_runtime; } } diff --git a/kernel/signal.c b/kernel/signal.c index e661b01..6eea582 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) struct siginfo info; unsigned long flags; struct sighand_struct *psig; + struct task_cputime cputime; int ret = sig; BUG_ON(sig == -1); @@ -1368,10 +1369,9 @@ int do_notify_parent(struct task_struct *tsk, int sig) info.si_uid = tsk->uid; - info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, - tsk->signal->utime)); - info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, - tsk->signal->stime)); + thread_group_cputime(tsk, &cputime); + info.si_utime = cputime_to_jiffies(cputime.utime); + info.si_stime = cputime_to_jiffies(cputime.stime); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) diff --git a/kernel/sys.c b/kernel/sys.c index 038a7bc..d046a7a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid) return old_fsgid; } +void do_sys_times(struct tms *tms) +{ + struct task_cputime cputime; + cputime_t cutime, cstime; + + spin_lock_irq(¤t->sighand->siglock); + thread_group_cputime(current, &cputime); + cutime = current->signal->cutime; + cstime = current->signal->cstime; + spin_unlock_irq(¤t->sighand->siglock); + tms->tms_utime = cputime_to_clock_t(cputime.utime); + tms->tms_stime = cputime_to_clock_t(cputime.stime); + tms->tms_cutime = cputime_to_clock_t(cutime); + tms->tms_cstime = cputime_to_clock_t(cstime); +} + asmlinkage long sys_times(struct tms __user * tbuf) { - /* - * In the SMP world we might just be unlucky and have one of - * the times increment as we use it. Since the value is an - * atomically safe type this is just fine. Conceptually its - * as if the syscall took an instant longer to occur. - */ if (tbuf) { struct tms tmp; - struct task_struct *tsk = current; - struct task_struct *t; - cputime_t utime, stime, cutime, cstime; - - spin_lock_irq(&tsk->sighand->siglock); - utime = tsk->signal->utime; - stime = tsk->signal->stime; - t = tsk; - do { - utime = cputime_add(utime, t->utime); - stime = cputime_add(stime, t->stime); - t = next_thread(t); - } while (t != tsk); - - cutime = tsk->signal->cutime; - cstime = tsk->signal->cstime; - spin_unlock_irq(&tsk->sighand->siglock); - - tmp.tms_utime = cputime_to_clock_t(utime); - tmp.tms_stime = cputime_to_clock_t(stime); - tmp.tms_cutime = cputime_to_clock_t(cutime); - tmp.tms_cstime = cputime_to_clock_t(cstime); + + do_sys_times(&tmp); if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) return -EFAULT; } @@ -1445,7 +1435,6 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; - unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) @@ -1491,18 +1480,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) if (new_rlim.rlim_cur == RLIM_INFINITY) goto out; - it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); - if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { - unsigned long rlim_cur = new_rlim.rlim_cur; - cputime_t cputime; - - cputime = secs_to_cputime(rlim_cur); - read_lock(&tasklist_lock); - spin_lock_irq(¤t->sighand->siglock); - set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); - spin_unlock_irq(¤t->sighand->siglock); - read_unlock(&tasklist_lock); - } + update_rlimit_cpu(new_rlim.rlim_cur); out: return 0; } @@ -1540,11 +1518,8 @@ out: * */ -static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, - cputime_t *utimep, cputime_t *stimep) +static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) { - *utimep = cputime_add(*utimep, t->utime); - *stimep = cputime_add(*stimep, t->stime); r->ru_nvcsw += t->nvcsw; r->ru_nivcsw += t->nivcsw; r->ru_minflt += t->min_flt; @@ -1558,12 +1533,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) struct task_struct *t; unsigned long flags; cputime_t utime, stime; + struct task_cputime cputime; memset((char *) r, 0, sizeof *r); utime = stime = cputime_zero; if (who == RUSAGE_THREAD) { - accumulate_thread_rusage(p, r, &utime, &stime); + accumulate_thread_rusage(p, r); goto out; } @@ -1586,8 +1562,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) break; case RUSAGE_SELF: - utime = cputime_add(utime, p->signal->utime); - stime = cputime_add(stime, p->signal->stime); + thread_group_cputime(p, &cputime); + utime = cputime_add(utime, cputime.utime); + stime = cputime_add(stime, cputime.stime); r->ru_nvcsw += p->signal->nvcsw; r->ru_nivcsw += p->signal->nivcsw; r->ru_minflt += p->signal->min_flt; @@ -1596,7 +1573,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) r->ru_oublock += p->signal->oublock; t = p; do { - accumulate_thread_rusage(t, r, &utime, &stime); + accumulate_thread_rusage(t, r); t = next_thread(t); } while (t != p); break; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 03fc6a8..6964978 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -75,6 +75,7 @@ #include #include #include +#include #include "avc.h" #include "objsec.h" @@ -2321,13 +2322,7 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) initrlim = init_task.signal->rlim+i; rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur); } - if (current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { - /* - * This will cause RLIMIT_CPU calculations - * to be refigured. - */ - current->it_prof_expires = jiffies_to_cputime(1); - } + update_rlimit_cpu(rlim->rlim_cur); } /* Wake up the parent if it is waiting so that it can -- cgit v0.10.2 From 430b5294bd72c085c730e1e4b86580f164d976bf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Sep 2008 16:33:01 +0200 Subject: timers: fix itimer/many thread hang, fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix: kernel/fork.c:843: error: ‘struct signal_struct’ has no member named ‘sum_sched_runtime’ kernel/irq/handle.c:117: warning: ‘sparse_irq_lock’ defined but not used Signed-off-by: Ingo Molnar diff --git a/kernel/fork.c b/kernel/fork.c index a8ac2ef..1181b9a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -834,7 +834,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; task_io_accounting_init(&sig->ioac); - sig->sum_sched_runtime = 0; INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); INIT_LIST_HEAD(&sig->cpu_timers[2]); -- cgit v0.10.2 From 0a8eaa4f9b58759595a1bfe13a1295fdc25ba026 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Sep 2008 17:03:52 +0200 Subject: timers: fix itimer/many thread hang, fix #2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix the UP build: In file included from arch/x86/kernel/asm-offsets_32.c:9, from arch/x86/kernel/asm-offsets.c:3: include/linux/sched.h: In function ‘thread_group_cputime_clone_thread’: include/linux/sched.h:2272: warning: no return statement in function returning non-void include/linux/sched.h: In function ‘thread_group_cputime_account_user’: include/linux/sched.h:2284: error: invalid type argument of ‘->’ (have ‘struct task_cputime’) include/linux/sched.h:2284: error: invalid type argument of ‘->’ (have ‘struct task_cputime’) include/linux/sched.h: In function ‘thread_group_cputime_account_system’: include/linux/sched.h:2291: error: invalid type argument of ‘->’ (have ‘struct task_cputime’) include/linux/sched.h:2291: error: invalid type argument of ‘->’ (have ‘struct task_cputime’) include/linux/sched.h: In function ‘thread_group_cputime_account_exec_runtime’: include/linux/sched.h:2298: error: invalid type argument of ‘->’ (have ‘struct task_cputime’) distcc[14501] ERROR: compile arch/x86/kernel/asm-offsets.c on a/30 failed make[1]: *** [arch/x86/kernel/asm-offsets.s] Error 1 Signed-off-by: Ingo Molnar diff --git a/include/linux/sched.h b/include/linux/sched.h index 26d7a5f..ed355f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2251,6 +2251,7 @@ static inline void thread_group_cputime_free(struct signal_struct *sig) static inline int thread_group_cputime_clone_thread(struct task_struct *curr, struct task_struct *tsk) { + return 0; } static inline void thread_group_cputime(struct task_struct *tsk, @@ -2263,21 +2264,21 @@ static inline void thread_group_cputime_account_user( struct thread_group_cputime *tgtimes, cputime_t cputime) { - tgtimes->totals->utime = cputime_add(tgtimes->totals->utime, cputime); + tgtimes->totals.utime = cputime_add(tgtimes->totals.utime, cputime); } static inline void thread_group_cputime_account_system( struct thread_group_cputime *tgtimes, cputime_t cputime) { - tgtimes->totals->stime = cputime_add(tgtimes->totals->stime, cputime); + tgtimes->totals.stime = cputime_add(tgtimes->totals.stime, cputime); } static inline void thread_group_cputime_account_exec_runtime( struct thread_group_cputime *tgtimes, unsigned long long ns) { - tgtimes->totals->sum_exec_runtime += ns; + tgtimes->totals.sum_exec_runtime += ns; } #endif /* CONFIG_SMP */ -- cgit v0.10.2 From 5ce73a4a5a4893a1aa4cdeed1b1a5a6de42c43b6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 14 Sep 2008 17:11:46 +0200 Subject: timers: fix itimer/many thread hang, cleanups Signed-off-by: Ingo Molnar diff --git a/include/linux/sched.h b/include/linux/sched.h index ed355f0..7ce8d4e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -430,7 +430,7 @@ struct pacct_struct { * @utime: time spent in user mode, in &cputime_t units * @stime: time spent in kernel mode, in &cputime_t units * @sum_exec_runtime: total time spent on the CPU, in nanoseconds - * + * * This structure groups together three kinds of CPU time that are * tracked for threads and thread groups. Most things considering * CPU time want to group these counts together and treat all three diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index dba1c33..9a7ea04 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -94,7 +94,7 @@ void update_rlimit_cpu(unsigned long rlim_new) cputime = secs_to_cputime(rlim_new); if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || - cputime_lt(current->signal->it_prof_expires, cputime)) { + cputime_lt(current->signal->it_prof_expires, cputime)) { spin_lock_irq(¤t->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(¤t->sighand->siglock); @@ -1372,9 +1372,9 @@ void run_posix_cpu_timers(struct task_struct *tsk) * tsk->signal is non-NULL; this probably can't happen but cover the * possibility anyway. */ - if (unlikely(!sig) || !fastpath_timer_check(tsk, sig)) { + if (unlikely(!sig) || !fastpath_timer_check(tsk, sig)) return; - } + sighand = lock_task_sighand(tsk, &flags); if (likely(sighand)) { /* -- cgit v0.10.2 From 8afbc114542a6810b0a2e658abda6e911121cd22 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 25 Aug 2008 12:01:31 +0300 Subject: [MTD] [NAND] OMAP2: add retry after read timeout Very occasionally, (about one in a million) read operations are ongoing after the timeout has expired. So, retry three times while the ongoing bit remains set. Signed-off-by: Adrian Hunter Signed-off-by: David Woodhouse diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 34b4253..8387e05 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -187,16 +187,36 @@ retry: } } } else { + int retry_cnt = 0; + /* Turn interrupts off */ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); syscfg &= ~ONENAND_SYS_CFG1_IOBE; write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); timeout = jiffies + msecs_to_jiffies(20); - while (time_before(jiffies, timeout)) { - intr = read_reg(c, ONENAND_REG_INTERRUPT); - if (intr & ONENAND_INT_MASTER) + while (1) { + if (time_before(jiffies, timeout)) { + intr = read_reg(c, ONENAND_REG_INTERRUPT); + if (intr & ONENAND_INT_MASTER) + break; + } else { + /* Timeout after 20ms */ + ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); + if (ctrl & ONENAND_CTRL_ONGO) { + /* + * The operation seems to be still going + * so give it some more time. + */ + retry_cnt += 1; + if (retry_cnt < 3) { + timeout = jiffies + + msecs_to_jiffies(20); + continue; + } + } break; + } } } -- cgit v0.10.2 From ef89a8801321e0d0665c327c9d77d602ef764c87 Mon Sep 17 00:00:00 2001 From: Karl Beldan Date: Mon, 15 Sep 2008 14:37:29 +0200 Subject: [MTD] [NAND] nand_base.c: reset chip first Some chips require a RESET after power-up (e.g. Micron MT29FxGxxxxx). The first command sent is NAND_CMD_READID. Issue a NAND_CMD_RESET in nand_scan_ident before reading the device id. Tested with an MT29F4G08AAC. Signed-off-by: Karl Beldan Signed-off-by: David Woodhouse diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d303db3..0a9c9cd 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2318,6 +2318,12 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, /* Select the device */ chip->select_chip(mtd, 0); + /* + * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) + * after power-up + */ + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + /* Send the command for reading device ID */ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); @@ -2488,6 +2494,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips) /* Check for a chip array */ for (i = 1; i < maxchips; i++) { chip->select_chip(mtd, i); + /* See comment in nand_get_flash_type for reset */ + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); /* Send the command for reading device ID */ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); /* Read manufacturer and device IDs */ -- cgit v0.10.2 From b3d765f5df5707e2b3676768b6877db5d8db76a2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Sep 2008 23:12:11 +0900 Subject: sh: Fix up fpu emu build. The addition of the kprobes code pushed down a variable declaration, clean it up. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 862667a..35b901e 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -742,15 +742,13 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - unsigned long error_code; + unsigned long inst; struct task_struct *tsk = current; if (kprobe_handle_illslot(regs->pc) == 0) return; #ifdef CONFIG_SH_FPU_EMU - unsigned short inst = 0; - get_user(inst, (unsigned short *)regs->pc + 1); if (!do_fpu_inst(inst, regs)) { get_user(inst, (unsigned short *)regs->pc); @@ -761,12 +759,12 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, /* not a FPU inst. */ #endif - lookup_exception_vector(error_code); + lookup_exception_vector(inst); local_irq_enable(); CHK_REMOTE_DEBUG(regs); force_sig(SIGILL, tsk); - die_if_no_fixup("illegal slot instruction", regs, error_code); + die_if_no_fixup("illegal slot instruction", regs, inst); } asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, -- cgit v0.10.2 From b85641bdde340f683e5baa7688832e185548c9bd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Sep 2008 23:13:27 +0900 Subject: sh: Make memory hot-add and hot-remove depend on MMU. Cleans up link numerous build issues with page migration and so on when enabled on nommu builds. Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index f8e6dc5..555ec97 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -132,11 +132,11 @@ config ARCH_SELECT_MEMORY_MODEL config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y - depends on SPARSEMEM + depends on SPARSEMEM && MMU config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y - depends on SPARSEMEM + depends on SPARSEMEM && MMU config ARCH_MEMORY_PROBE def_bool y -- cgit v0.10.2 From 8a80a5e9e89cf3aacf8165dd34b40c7c3fe91b4d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Sep 2008 23:14:36 +0900 Subject: sh: Fix up signal_64 conflicting handle_signal() definition. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 1d62dfe..37bd381 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -43,6 +43,10 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) +static void +handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, + sigset_t *oldset, struct pt_regs * regs); + /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by -- cgit v0.10.2 From 81b669952ed5fe0d6f65f8b9a97d1fdeac93ff10 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Sep 2008 23:24:02 +0900 Subject: sh: Consolidate struct sh_cpuinfo definitions across _32/_64 split. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 58e2be5..693364a 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -3,6 +3,7 @@ #include #include +#include #ifndef __ASSEMBLY__ /* @@ -43,8 +44,45 @@ enum cpu_type { CPU_SH_NONE }; +/* + * TLB information structure + * + * Defined for both I and D tlb, per-processor. + */ +struct tlb_info { + unsigned long long next; + unsigned long long first; + unsigned long long last; + + unsigned int entries; + unsigned int step; + + unsigned long flags; +}; + +struct sh_cpuinfo { + unsigned int type; + int cut_major, cut_minor; + unsigned long loops_per_jiffy; + unsigned long asid_cache; + + struct cache_info icache; /* Primary I-cache */ + struct cache_info dcache; /* Primary D-cache */ + struct cache_info scache; /* Secondary cache */ + + /* TLB info */ + struct tlb_info itlb; + struct tlb_info dtlb; + + unsigned long flags; +} __attribute__ ((aligned(L1_CACHE_BYTES))); + +extern struct sh_cpuinfo cpu_data[]; +#define boot_cpu_data cpu_data[0] +#define current_cpu_data cpu_data[smp_processor_id()] +#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] + /* Forward decl */ -struct sh_cpuinfo; struct seq_operations; extern struct pt_regs fake_swapper_regs; diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 1cd3a14..a46a020 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -13,7 +13,6 @@ #include #include #include -#include #include /* @@ -27,24 +26,6 @@ #define CCN_CVR 0xff000040 #define CCN_PRR 0xff000044 -struct sh_cpuinfo { - unsigned int type; - int cut_major, cut_minor; - unsigned long loops_per_jiffy; - unsigned long asid_cache; - - struct cache_info icache; /* Primary I-cache */ - struct cache_info dcache; /* Primary D-cache */ - struct cache_info scache; /* Secondary cache */ - - unsigned long flags; -} __attribute__ ((aligned(L1_CACHE_BYTES))); - -extern struct sh_cpuinfo cpu_data[]; -#define boot_cpu_data cpu_data[0] -#define current_cpu_data cpu_data[smp_processor_id()] -#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] - asmlinkage void __init sh_cpu_init(void); /* diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index ae19839..b0b4824 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -36,46 +35,6 @@ __asm__("gettr tr0, %1\n\t" \ : "1" (__dummy)); \ pc; }) -/* - * TLB information structure - * - * Defined for both I and D tlb, per-processor. - */ -struct tlb_info { - unsigned long long next; - unsigned long long first; - unsigned long long last; - - unsigned int entries; - unsigned int step; - - unsigned long flags; -}; - -struct sh_cpuinfo { - enum cpu_type type; - unsigned long loops_per_jiffy; - unsigned long asid_cache; - - unsigned int cpu_clock, master_clock, bus_clock, module_clock; - - /* Cache info */ - struct cache_info icache; - struct cache_info dcache; - struct cache_info scache; - - /* TLB info */ - struct tlb_info itlb; - struct tlb_info dtlb; - - unsigned long flags; -}; - -extern struct sh_cpuinfo cpu_data[]; -#define boot_cpu_data cpu_data[0] -#define current_cpu_data cpu_data[smp_processor_id()] -#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] - #endif /* -- cgit v0.10.2 From b406efefd5246a67c691fd79871e65ce8f3f91ff Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Sep 2008 23:24:59 +0900 Subject: sh: Fix up headers_check regression. linux/mmzone.h isn't exported, kill it off from asm/setup.h and simply deal with it in the places that have a dependency instead. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index 1b7856f..d450bcf 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -1,12 +1,9 @@ #ifndef _SH_SETUP_H #define _SH_SETUP_H -#include - #define COMMAND_LINE_SIZE 256 #ifdef __KERNEL__ - /* * This is set up by the setup-routine at boot-time */ -- cgit v0.10.2 From 2194478157127d52338be96ac9436dc54005816a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Sep 2008 23:26:44 +0900 Subject: sh: HAVE_IOREMAP_PROT depends on MMU. HAVE_IOREMAP_PROT enables an unconditional reference to generic_access_phys(), which remains undefined in the nommu case. As there's no point in supporting this there anyways, simply fix up the dependency. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 18a1cc8..adef42c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -12,7 +12,7 @@ config SUPERH select HAVE_IDE select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT - select HAVE_IOREMAP_PROT + select HAVE_IOREMAP_PROT if MMU help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast -- cgit v0.10.2 From ccbe329bcd87924baed96474ec0a6725e3957897 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:03 +0100 Subject: bitmap: add bitmap_copy_le() bitmap_copy_le() copies a bitmap, putting the bits into little-endian order (i.e., each unsigned long word in the bitmap is put into little-endian order). The UWB stack used bitmaps to manage Medium Access Slot availability, and these bitmaps need to be written to the hardware in LE order. Signed-off-by: David Vrabel diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 89781fd..5379913 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -130,6 +130,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); +extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); #define BITMAP_LAST_WORD_MASK(nbits) \ ( \ diff --git a/lib/bitmap.c b/lib/bitmap.c index 06fb57c..c2006bf 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -1007,3 +1007,25 @@ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) return 0; } EXPORT_SYMBOL(bitmap_allocate_region); + +/** + * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. + * @dst: destination buffer + * @src: bitmap to copy + * @nbits: number of bits in the bitmap + * + * Require nbits % BITS_PER_LONG == 0. + */ +void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) +{ + unsigned long *d = dst; + int i; + + for (i = 0; i < nbits/BITS_PER_LONG; i++) { + if (BITS_PER_LONG == 64) + d[i] = cpu_to_le64(src[i]); + else + d[i] = cpu_to_le32(src[i]); + } +} +EXPORT_SYMBOL(bitmap_copy_le); -- cgit v0.10.2 From 99d368bc9e279a2a5e56f3afe32166260e90caa7 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:04 +0100 Subject: uwb: add initial documentation Documentation (and example utilities) for the UWB (and WUSB) stacks. Some of the documentation may be out-of-date. Signed-off-by: David Vrabel diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt new file mode 100644 index 0000000..4c3d62c --- /dev/null +++ b/Documentation/usb/WUSB-Design-overview.txt @@ -0,0 +1,448 @@ + +Linux UWB + Wireless USB + WiNET + + (C) 2005-2006 Intel Corporation + Inaky Perez-Gonzalez + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License version + 2 as published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + +Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for +updated content. + + * Design-overview.txt-1.8 + +This code implements a Ultra Wide Band stack for Linux, as well as +drivers for the the USB based UWB radio controllers defined in the +Wireless USB 1.0 specification (including Wireless USB host controller +and an Intel WiNET controller). + + 1. Introduction + 1. HWA: Host Wire adapters, your Wireless USB dongle + + 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired + devices + 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host + adapter + 2. The UWB stack + 1. Devices and hosts: the basic structure + + 2. Host Controller life cycle + + 3. On the air: beacons and enumerating the radio neighborhood + + 4. Device lists + 5. Bandwidth allocation + + 3. Wireless USB Host Controller drivers + + 4. Glossary + + + Introduction + +UWB is a wide-band communication protocol that is to serve also as the +low-level protocol for others (much like TCP sits on IP). Currently +these others are Wireless USB and TCP/IP, but seems Bluetooth and +Firewire/1394 are coming along. + +UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of +~-41dB (or 0.074 uW/MHz--geography specific data is still being +negotiated w/ regulators, so watch for changes). That band is divided in +a bunch of ~1.5 GHz wide channels (or band groups) composed of three +subbands/subchannels (528 MHz each). Each channel is independent of each +other, so you could consider them different "busses". Initially this +driver considers them all a single one. + +Radio time is divided in 65536 us long /superframes/, each one divided +in 256 256us long /MASs/ (Media Allocation Slots), which are the basic +time/media allocation units for transferring data. At the beginning of +each superframe there is a Beacon Period (BP), where every device +transmit its beacon on a single MAS. The length of the BP depends on how +many devices are present and the length of their beacons. + +Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16 +bit address) and send periodic beacons to advertise themselves and pass +info on what they are and do. They advertise their capabilities and a +bunch of other stuff. + +The different logical parts of this driver are: + + * + + *UWB*: the Ultra-Wide-Band stack -- manages the radio and + associated spectrum to allow for devices sharing it. Allows to + control bandwidth assingment, beaconing, scanning, etc + + * + + *WUSB*: the layer that sits on top of UWB to provide Wireless USB. + The Wireless USB spec defines means to control a UWB radio and to + do the actual WUSB. + + + HWA: Host Wire adapters, your Wireless USB dongle + +WUSB also defines a device called a Host Wire Adaptor (HWA), which in +mere terms is a USB dongle that enables your PC to have UWB and Wireless +USB. The Wireless USB Host Controller in a HWA looks to the host like a +[Wireless] USB controller connected via USB (!) + +The HWA itself is broken in two or three main interfaces: + + * + + *RC*: Radio control -- this implements an interface to the + Ultra-Wide-Band radio controller. The driver for this implements a + USB-based UWB Radio Controller to the UWB stack. + + * + + *HC*: the wireless USB host controller. It looks like a USB host + whose root port is the radio and the WUSB devices connect to it. + To the system it looks like a separate USB host. The driver (will) + implement a USB host controller (similar to UHCI, OHCI or EHCI) + for which the root hub is the radio...To reiterate: it is a USB + controller that is connected via USB instead of PCI. + + * + + *WINET*: some HW provide a WiNET interface (IP over UWB). This + package provides a driver for it (it looks like a network + interface, winetX). The driver detects when there is a link up for + their type and kick into gear. + + + DWA: Device Wired Adaptor, a Wireless USB hub for wired devices + +These are the complement to HWAs. They are a USB host for connecting +wired devices, but it is connected to your PC connected via Wireless +USB. To the system it looks like yet another USB host. To the untrained +eye, it looks like a hub that connects upstream wirelessly. + +We still offer no support for this; however, it should share a lot of +code with the HWA-RC driver; there is a bunch of factorization work that +has been done to support that in upcoming releases. + + + WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter + +This is your usual PCI device that implements WHCI. Similar in concept +to EHCI, it allows your wireless USB devices (including DWAs) to connect +to your host via a PCI interface. As in the case of the HWA, it has a +Radio Control interface and the WUSB Host Controller interface per se. + +There is still no driver support for this, but will be in upcoming +releases. + + + The UWB stack + +The main mission of the UWB stack is to keep a tally of which devices +are in radio proximity to allow drivers to connect to them. As well, it +provides an API for controlling the local radio controllers (RCs from +now on), such as to start/stop beaconing, scan, allocate bandwidth, etc. + + + Devices and hosts: the basic structure + +The main building block here is the UWB device (struct uwb_dev). For +each device that pops up in radio presence (ie: the UWB host receives a +beacon from it) you get a struct uwb_dev that will show up in +/sys/class/uwb and in /sys/bus/uwb/devices. + +For each RC that is detected, a new struct uwb_rc is created. In turn, a +RC is also a device, so they also show in /sys/class/uwb and +/sys/bus/uwb/devices, but at the same time, only radio controllers show +up in /sys/class/uwb_rc. + + * + + [*] The reason for RCs being also devices is that not only we can + see them while enumerating the system device tree, but also on the + radio (their beacons and stuff), so the handling has to be + likewise to that of a device. + +Each RC driver is implemented by a separate driver that plugs into the +interface that the UWB stack provides through a struct uwb_rc_ops. The +spec creators have been nice enough to make the message format the same +for HWA and WHCI RCs, so the driver is really a very thin transport that +moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/] +and sends the replies and notifications back to the API +[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that +is chartered, among other things, to keep the tab of how the UWB radio +neighborhood looks, creating and destroying devices as they show up or +dissapear. + +Command execution is very simple: a command block is sent and a event +block or reply is expected back. For sending/receiving command/events, a +handle called /neh/ (Notification/Event Handle) is opened with +/uwb_rc_neh_open()/. + +The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for +the USB connected HWA. Eventually, drivers/whci-rc.c will do the same +for the PCI connected WHCI controller. + + + Host Controller life cycle + +So let's say we connect a dongle to the system: it is detected and +firmware uploaded if needed [for Intel's i1480 +/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated. +Now we have a real HWA device connected and +/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the +Wire-Adaptor environment and then suck it into the UWB stack's vision of +the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/]. + + * + + [*] The stack should put a new RC to scan for devices + [/uwb_rc_scan()/] so it finds what's available around and tries to + connect to them, but this is policy stuff and should be driven + from user space. As of now, the operator is expected to do it + manually; see the release notes for documentation on the procedure. + +When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/ +takes time of tearing everything down safely (or not...). + + + On the air: beacons and enumerating the radio neighborhood + +So assuming we have devices and we have agreed for a channel to connect +on (let's say 9), we put the new RC to beacon: + + * + + $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon + +Now it is visible. If there were other devices in the same radio channel +and beacon group (that's what the zero is for), the dongle's radio +control interface will send beacon notifications on its +notification/event endpoint (NEEP). The beacon notifications are part of +the event stream that is funneled into the API with +/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB +daemon through a notification list. + +UWBD wakes up and scans the event list; finds a beacon and adds it to +the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from +the same device, he considers it to be 'onair' and creates a new device +[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons +are received in some time, the device is considered gone and wiped out +[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge +the beacon cache of dead devices]. + + + Device lists + +All UWB devices are kept in the list of the struct bus_type uwb_bus. + + + Bandwidth allocation + +The UWB stack maintains a local copy of DRP availability through +processing of incoming *DRP Availability Change* notifications. This +local copy is currently used to present the current bandwidth +availability to the user through the sysfs file +/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth +availability information will be used by the bandwidth reservation +routines. + +The bandwidth reservation routines are in progress and are thus not +present in the current release. When completed they will enable a user +to initiate DRP reservation requests through interaction with sysfs. DRP +reservation requests from remote UWB devices will also be handled. The +bandwidth management done by the UWB stack will include callbacks to the +higher layers will enable the higher layers to use the reservations upon +completion. [Note: The bandwidth reservation work is in progress and +subject to change.] + + + Wireless USB Host Controller drivers + +*WARNING* This section needs a lot of work! + +As explained above, there are three different types of HCs in the WUSB +world: HWA-HC, DWA-HC and WHCI-HC. + +HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB +connected controllers), and their transfer management system is almost +identical. So is their notification delivery system. + +HWA-HC and WHCI-HC share that they are both WUSB host controllers, so +they have to deal with WUSB device life cycle and maintenance, wireless +root-hub + +HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has +three endpoints (Notifications, Data Transfer In and Data Transfer +Out--known as NEP, DTI and DTO in the code). + +We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster +ID and tell the HC to use all that. Then we start it. This means the HC +starts sending MMCs. + + * + + The MMCs are blocks of data defined somewhere in the WUSB1.0 spec + that define a stream in the UWB channel time allocated for sending + WUSB IEs (host to device commands/notifications) and Device + Notifications (device initiated to host). Each host defines a + unique Wireless USB cluster through MMCs. Devices can connect to a + single cluster at the time. The IEs are Information Elements, and + among them are the bandwidth allocations that tell each device + when can they transmit or receive. + +Now it all depends on external stimuli. + +*New device connection* + +A new device pops up, it scans the radio looking for MMCs that give out +the existence of Wireless USB channels. Once one (or more) are found, +selects which one to connect to. Sends a /DN_Connect/ (device +notification connect) during the DNTS (Device Notification Time +Slot--announced in the MMCs + +HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery +into /devconnect/). This process starts the authentication process for +the device. First we allocate a /fake port/ and assign an +unauthenticated address (128 to 255--what we really do is +0x80 | fake_port_idx). We fiddle with the fake port status and /khubd/ +sees a new connection, so he moves on to enable the fake port with a reset. + +So now we are in the reset path -- we know we have a non-yet enumerated +device with an unauthorized address; we ask user space to authenticate +(FIXME: not yet done, similar to bluetooth pairing), then we do the key +exchange (FIXME: not yet done) and issue a /set address 0/ to bring the +device to the default state. Device is authenticated. + +From here, the USB stack takes control through the usb_hcd ops. khubd +has seen the port status changes, as we have been toggling them. It will +start enumerating and doing transfers through usb_hcd->urb_enqueue() to +read descriptors and move our data. + +*Device life cycle and keep alives* + +Everytime there is a succesful transfer to/from a device, we update a +per-device activity timestamp. If not, every now and then we check and +if the activity timestamp gets old, we ping the device by sending it a +Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this +arrives to us as a notification through +devconnect.c:wusb_handle_dn_alive(). If a device times out, we +disconnect it from the system (cleaning up internal information and +toggling the bits in the fake hub port, which kicks khubd into removing +the rest of the stuff). + +This is done through devconnect:__wusb_check_devs(), which will scan the +device list looking for whom needs refreshing. + +If the device wants to disconnect, it will either die (ugly) or send a +/DN_Disconnect/ that will prompt a disconnection from the system. + +*Sending and receiving data* + +Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is +/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and +DWAs. + +Each HC has a number of rpipes and buffers that can be assigned to them; +when doing a data transfer (xfer), first the rpipe has to be aimed and +prepared (buffers assigned), then we can start queueing requests for +data in or out. + +Data buffers have to be segmented out before sending--so we send first a +header (segment request) and then if there is any data, a data buffer +immediately after to the DTI interface (yep, even the request). If our +buffer is bigger than the max segment size, then we just do multiple +requests. + +[This sucks, because doing USB scatter gatter in Linux is resource +intensive, if any...not that the current approach is not. It just has to +be cleaned up a lot :)]. + +If reading, we don't send data buffers, just the segment headers saying +we want to read segments. + +When the xfer is executed, we receive a notification that says data is +ready in the DTI endpoint (handled through +xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a +descriptor that gives us the status of the transfer, its identification +(given when we issued it) and the segment number. If it was a data read, +we issue another URB to read into the destination buffer the chunk of +data coming out of the remote endpoint. Done, wait for the next guy. The +callbacks for the URBs issued from here are the ones that will declare +the xfer complete at some point and call it's callback. + +Seems simple, but the implementation is not trivial. + + * + + *WARNING* Old!! + +The main xfer descriptor, wa_xfer (equivalent to a URB) contains an +array of segments, tallys on segments and buffers and callback +information. Buried in there is a lot of URBs for executing the segments +and buffer transfers. + +For OUT xfers, there is an array of segments, one URB for each, another +one of buffer URB. When submitting, we submit URBs for segment request +1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer +result data; when all the segments are complete, we call the callback to +finalize the transfer. + +For IN xfers, we only issue URBs for the segments we want to read and +then wait for the xfer result data. + +*URB mapping into xfers* + +This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an +rpipe to the endpoint where we have to transmit, create a transfer +context (wa_xfer) and submit it. When the xfer is done, our callback is +called and we assign the status bits and release the xfer resources. + +In dequeue() we are basically cancelling/aborting the transfer. We issue +a xfer abort request to the HC, cancell all the URBs we had submitted +and not yet done and when all that is done, the xfer callback will be +called--this will call the URB callback. + + + Glossary + +*DWA* -- Device Wire Adapter + +USB host, wired for downstream devices, upstream connects wirelessly +with Wireless USB. + +*EVENT* -- Response to a command on the NEEP + +*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB + +*NEH* -- Notification/Event Handle + +Handle/file descriptor for receiving notifications or events. The WA +code requires you to get one of this to listen for notifications or +events on the NEEP. + +*NEEP* -- Notification/Event EndPoint + +Stuff related to the management of the first endpoint of a HWA USB +dongle that is used to deliver an stream of events and notifications to +the host. + +*NOTIFICATION* -- Message coming in the NEEP as response to something. + +*RC* -- Radio Control + +Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by +InakyPerezGonzalez) + diff --git a/Documentation/usb/wusb-cbaf b/Documentation/usb/wusb-cbaf new file mode 100644 index 0000000..a385478 --- /dev/null +++ b/Documentation/usb/wusb-cbaf @@ -0,0 +1,133 @@ +#! /bin/bash +# + +set -e + +progname=$(basename $0) +function help +{ + cat < $uwb_rc/uwb_rc/beacon + echo I: started beaconing on ch 13 in host $(basename $uwb_rc) + fi + echo $host_CHID | cat > $dev/wusb_chid + echo I: started host $(basename $dev) + done + ;; + set-chid) + shift + for dev in ${2:-$devs} + do + echo "${2:-$host_CHID}" "${3:-$host_band_group}" "${4:-$host_name}" \ + | cat > $dev/wusb_host_info + done + ;; + get-cdid) + for dev in ${2:-$devs} + do + cat $dev/wusb_cdid + done + ;; + set-cc) + for dev in ${2:-$devs} + do + shift + CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)" + CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)" + cat > $dev/wusb_cc <&2 + help 1>&2 + result=1 +esac +exit $result -- cgit v0.10.2 From 34e95e41f1fd751e33a7eb3fa66594903b81f13d Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:05 +0100 Subject: uwb: add the uwb include files Signed-off-by: David Vrabel diff --git a/include/linux/uwb.h b/include/linux/uwb.h new file mode 100644 index 0000000..0cd3593 --- /dev/null +++ b/include/linux/uwb.h @@ -0,0 +1,761 @@ +/* + * Ultra Wide Band + * UWB API + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: doc: overview of the API, different parts and pointers + */ + +#ifndef __LINUX__UWB_H__ +#define __LINUX__UWB_H__ + +#include +#include +#include +#include +#include +#include + +struct uwb_dev; +struct uwb_beca_e; +struct uwb_rc; +struct uwb_rsv; +struct uwb_dbg; + +/** + * struct uwb_dev - a UWB Device + * @rc: UWB Radio Controller that discovered the device (kind of its + * parent). + * @bce: a beacon cache entry for this device; or NULL if the device + * is a local radio controller. + * @mac_addr: the EUI-48 address of this device. + * @dev_addr: the current DevAddr used by this device. + * @beacon_slot: the slot number the beacon is using. + * @streams: bitmap of streams allocated to reservations targeted at + * this device. For an RC, this is the streams allocated for + * reservations targeted at DevAddrs. + * + * A UWB device may either by a neighbor or part of a local radio + * controller. + */ +struct uwb_dev { + struct mutex mutex; + struct list_head list_node; + struct device dev; + struct uwb_rc *rc; /* radio controller */ + struct uwb_beca_e *bce; /* Beacon Cache Entry */ + + struct uwb_mac_addr mac_addr; + struct uwb_dev_addr dev_addr; + int beacon_slot; + DECLARE_BITMAP(streams, UWB_NUM_STREAMS); +}; +#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) + +/** + * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs + * + * RC[CE]Bs have a 'context ID' field that matches the command with + * the event received to confirm it. + * + * Maximum number of context IDs + */ +enum { UWB_RC_CTX_MAX = 256 }; + + +/** Notification chain head for UWB generated events to listeners */ +struct uwb_notifs_chain { + struct list_head list; + struct mutex mutex; +}; + +/** + * struct uwb_mas_bm - a bitmap of all MAS in a superframe + * @bm: a bitmap of length #UWB_NUM_MAS + */ +struct uwb_mas_bm { + DECLARE_BITMAP(bm, UWB_NUM_MAS); +}; + +/** + * uwb_rsv_state - UWB Reservation state. + * + * NONE - reservation is not active (no DRP IE being transmitted). + * + * Owner reservation states: + * + * INITIATED - owner has sent an initial DRP request. + * PENDING - target responded with pending Reason Code. + * MODIFIED - reservation manager is modifying an established + * reservation with a different MAS allocation. + * ESTABLISHED - the reservation has been successfully negotiated. + * + * Target reservation states: + * + * DENIED - request is denied. + * ACCEPTED - request is accepted. + * PENDING - PAL has yet to make a decision to whether to accept or + * deny. + * + * FIXME: further target states TBD. + */ +enum uwb_rsv_state { + UWB_RSV_STATE_NONE, + UWB_RSV_STATE_O_INITIATED, + UWB_RSV_STATE_O_PENDING, + UWB_RSV_STATE_O_MODIFIED, + UWB_RSV_STATE_O_ESTABLISHED, + UWB_RSV_STATE_T_ACCEPTED, + UWB_RSV_STATE_T_DENIED, + UWB_RSV_STATE_T_PENDING, + + UWB_RSV_STATE_LAST, +}; + +enum uwb_rsv_target_type { + UWB_RSV_TARGET_DEV, + UWB_RSV_TARGET_DEVADDR, +}; + +/** + * struct uwb_rsv_target - the target of a reservation. + * + * Reservations unicast and targeted at a single device + * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a + * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). + */ +struct uwb_rsv_target { + enum uwb_rsv_target_type type; + union { + struct uwb_dev *dev; + struct uwb_dev_addr devaddr; + }; +}; + +/* + * Number of streams reserved for reservations targeted at DevAddrs. + */ +#define UWB_NUM_GLOBAL_STREAMS 1 + +typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); + +/** + * struct uwb_rsv - a DRP reservation + * + * Data structure management: + * + * @rc: the radio controller this reservation is for + * (as target or owner) + * @rc_node: a list node for the RC + * @pal_node: a list node for the PAL + * + * Owner and target parameters: + * + * @owner: the UWB device owning this reservation + * @target: the target UWB device + * @type: reservation type + * + * Owner parameters: + * + * @max_mas: maxiumum number of MAS + * @min_mas: minimum number of MAS + * @sparsity: owner selected sparsity + * @is_multicast: true iff multicast + * + * @callback: callback function when the reservation completes + * @pal_priv: private data for the PAL making the reservation + * + * Reservation status: + * + * @status: negotiation status + * @stream: stream index allocated for this reservation + * @mas: reserved MAS + * @drp_ie: the DRP IE + * @ie_valid: true iff the DRP IE matches the reservation parameters + * + * DRP reservations are uniquely identified by the owner, target and + * stream index. However, when using a DevAddr as a target (e.g., for + * a WUSB cluster reservation) the responses may be received from + * devices with different DevAddrs. In this case, reservations are + * uniquely identified by just the stream index. A number of stream + * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. + */ +struct uwb_rsv { + struct uwb_rc *rc; + struct list_head rc_node; + struct list_head pal_node; + + struct uwb_dev *owner; + struct uwb_rsv_target target; + enum uwb_drp_type type; + int max_mas; + int min_mas; + int sparsity; + bool is_multicast; + + uwb_rsv_cb_f callback; + void *pal_priv; + + enum uwb_rsv_state state; + u8 stream; + struct uwb_mas_bm mas; + struct uwb_ie_drp *drp_ie; + bool ie_valid; + struct timer_list timer; + bool expired; +}; + +static const +struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; + +static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) +{ + bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); +} + +/** + * struct uwb_drp_avail - a radio controller's view of MAS usage + * @global: MAS unused by neighbors (excluding reservations targetted + * or owned by the local radio controller) or the beaon period + * @local: MAS unused by local established reservations + * @pending: MAS unused by local pending reservations + * @ie: DRP Availability IE to be included in the beacon + * @ie_valid: true iff @ie is valid and does not need to regenerated from + * @global and @local + * + * Each radio controller maintains a view of MAS usage or + * availability. MAS available for a new reservation are determined + * from the intersection of @global, @local, and @pending. + * + * The radio controller must transmit a DRP Availability IE that's the + * intersection of @global and @local. + * + * A set bit indicates the MAS is unused and available. + * + * rc->rsvs_mutex should be held before accessing this data structure. + * + * [ECMA-368] section 17.4.3. + */ +struct uwb_drp_avail { + DECLARE_BITMAP(global, UWB_NUM_MAS); + DECLARE_BITMAP(local, UWB_NUM_MAS); + DECLARE_BITMAP(pending, UWB_NUM_MAS); + struct uwb_ie_drp_avail ie; + bool ie_valid; +}; + + +const char *uwb_rsv_state_str(enum uwb_rsv_state state); +const char *uwb_rsv_type_str(enum uwb_drp_type type); + +struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, + void *pal_priv); +void uwb_rsv_destroy(struct uwb_rsv *rsv); + +int uwb_rsv_establish(struct uwb_rsv *rsv); +int uwb_rsv_modify(struct uwb_rsv *rsv, + int max_mas, int min_mas, int sparsity); +void uwb_rsv_terminate(struct uwb_rsv *rsv); + +void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); + +/** + * Radio Control Interface instance + * + * + * Life cycle rules: those of the UWB Device. + * + * @index: an index number for this radio controller, as used in the + * device name. + * @version: version of protocol supported by this device + * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. + * @cmd: Backend implementation to execute commands; rw and call + * only with uwb_dev.dev.sem taken. + * @reset: Hardware reset of radio controller and any PAL controllers. + * @filter: Backend implementation to manipulate data to and from device + * to be compliant to specification assumed by driver (WHCI + * 0.95). + * + * uwb_dev.dev.mutex is used to execute commands and update + * the corresponding structures; can't use a spinlock + * because rc->cmd() can sleep. + * @ies: This is a dynamically allocated array cacheing the + * IEs (settable by the host) that the beacon of this + * radio controller is currently sending. + * + * In reality, we store here the full command we set to + * the radio controller (which is basically a command + * prefix followed by all the IEs the beacon currently + * contains). This way we don't have to realloc and + * memcpy when setting it. + * + * We set this up in uwb_rc_ie_setup(), where we alloc + * this struct, call get_ie() [so we know which IEs are + * currently being sent, if any]. + * + * @ies_capacity:Amount of space (in bytes) allocated in @ies. The + * amount used is given by sizeof(*ies) plus ies->wIELength + * (which is a little endian quantity all the time). + * @ies_mutex: protect the IE cache + * @dbg: information for the debug interface + */ +struct uwb_rc { + struct uwb_dev uwb_dev; + int index; + u16 version; + + struct module *owner; + void *priv; + int (*start)(struct uwb_rc *rc); + void (*stop)(struct uwb_rc *rc); + int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); + int (*reset)(struct uwb_rc *rc); + int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); + int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, + size_t *, size_t *); + + spinlock_t neh_lock; /* protects neh_* and ctx_* */ + struct list_head neh_list; /* Open NE handles */ + unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; + u8 ctx_roll; + + int beaconing; /* Beaconing state [channel number] */ + int scanning; + enum uwb_scan_type scan_type:3; + unsigned ready:1; + struct uwb_notifs_chain notifs_chain; + + struct uwb_drp_avail drp_avail; + struct list_head reservations; + struct mutex rsvs_mutex; + struct workqueue_struct *rsv_workq; + struct work_struct rsv_update_work; + + struct mutex ies_mutex; + struct uwb_rc_cmd_set_ie *ies; + size_t ies_capacity; + + spinlock_t pal_lock; + struct list_head pals; + + struct uwb_dbg *dbg; +}; + + +/** + * struct uwb_pal - a UWB PAL + * @new_rsv: called when a peer requests a reservation (may be NULL if + * the PAL cannot accept reservation requests). + * + * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB + * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). + * + * The PALs using a radio controller must register themselves to + * permit the UWB stack to coordinate usage of the radio between the + * various PALs or to allow PALs to response to certain requests from + * peers. + * + * A struct uwb_pal should be embedded in a containing structure + * belonging to the PAL and initialized with uwb_pal_init()). Fields + * should be set appropriately by the PAL before registering the PAL + * with uwb_pal_register(). + */ +struct uwb_pal { + struct list_head node; + + void (*new_rsv)(struct uwb_rsv *rsv); +}; + +void uwb_pal_init(struct uwb_pal *pal); +int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); +void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); + +/* + * General public API + * + * This API can be used by UWB device drivers or by those implementing + * UWB Radio Controllers + */ +struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, + const struct uwb_dev_addr *devaddr); +struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); +static inline void uwb_dev_get(struct uwb_dev *uwb_dev) +{ + get_device(&uwb_dev->dev); +} +static inline void uwb_dev_put(struct uwb_dev *uwb_dev) +{ + put_device(&uwb_dev->dev); +} +struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); + +/** + * Callback function for 'uwb_{dev,rc}_foreach()'. + * + * @dev: Linux device instance + * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' + * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. + * + * @returns: 0 to continue the iterations, any other val to stop + * iterating and return the value to the caller of + * _foreach(). + */ +typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); +int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); + +struct uwb_rc *uwb_rc_alloc(void); +struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); +struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); +void uwb_rc_put(struct uwb_rc *rc); + +typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, + struct uwb_rceb *reply, ssize_t reply_size); + +int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + uwb_rc_cmd_cb_f cb, void *arg); +ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + struct uwb_rceb *reply, size_t reply_size); +ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + struct uwb_rceb **preply); +ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); +int uwb_bg_joined(struct uwb_rc *rc); + +size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); + +int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); +int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); +int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); +int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); +int __uwb_mac_addr_assigned_check(struct device *, void *); +int __uwb_dev_addr_assigned_check(struct device *, void *); + +/* Print in @buf a pretty repr of @addr */ +static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, + const struct uwb_dev_addr *addr) +{ + return __uwb_addr_print(buf, buf_size, addr->data, 0); +} + +/* Print in @buf a pretty repr of @addr */ +static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, + const struct uwb_mac_addr *addr) +{ + return __uwb_addr_print(buf, buf_size, addr->data, 1); +} + +/* @returns 0 if device addresses @addr2 and @addr1 are equal */ +static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, + const struct uwb_dev_addr *addr2) +{ + return memcmp(addr1, addr2, sizeof(*addr1)); +} + +/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ +static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, + const struct uwb_mac_addr *addr2) +{ + return memcmp(addr1, addr2, sizeof(*addr1)); +} + +/* @returns !0 if a MAC @addr is a broadcast address */ +static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) +{ + struct uwb_mac_addr bcast = { + .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } + }; + return !uwb_mac_addr_cmp(addr, &bcast); +} + +/* @returns !0 if a MAC @addr is all zeroes*/ +static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) +{ + struct uwb_mac_addr unset = { + .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } + }; + return !uwb_mac_addr_cmp(addr, &unset); +} + +/* @returns !0 if the address is in use. */ +static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, + struct uwb_dev_addr *addr) +{ + return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); +} + +/* + * UWB Radio Controller API + * + * This API is used (in addition to the general API) to implement UWB + * Radio Controllers. + */ +void uwb_rc_init(struct uwb_rc *); +int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); +void uwb_rc_rm(struct uwb_rc *); +void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); +void uwb_rc_neh_error(struct uwb_rc *, int); +void uwb_rc_reset_all(struct uwb_rc *rc); + +/** + * uwb_rsv_is_owner - is the owner of this reservation the RC? + * @rsv: the reservation + */ +static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) +{ + return rsv->owner == &rsv->rc->uwb_dev; +} + +/** + * Events generated by UWB that can be passed to any listeners + * + * Higher layers can register callback functions with the radio + * controller using uwb_notifs_register(). The radio controller + * maintains a list of all registered handlers and will notify all + * nodes when an event occurs. + */ +enum uwb_notifs { + UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ + UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ + UWB_NOTIF_ONAIR, + UWB_NOTIF_OFFAIR, +}; + +/* Callback function registered with UWB */ +struct uwb_notifs_handler { + struct list_head list_node; + void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); + void *data; +}; + +int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); +int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); + + +/** + * UWB radio controller Event Size Entry (for creating entry tables) + * + * WUSB and WHCI define events and notifications, and they might have + * fixed or variable size. + * + * Each event/notification has a size which is not necessarily known + * in advance based on the event code. As well, vendor specific + * events/notifications will have a size impossible to determine + * unless we know about the device's specific details. + * + * It was way too smart of the spec writers not to think that it would + * be impossible for a generic driver to skip over vendor specific + * events/notifications if there are no LENGTH fields in the HEADER of + * each message...the transaction size cannot be counted on as the + * spec does not forbid to pack more than one event in a single + * transaction. + * + * Thus, we guess sizes with tables (or for events, when you know the + * size ahead of time you can use uwb_rc_neh_extra_size*()). We + * register tables with the known events and their sizes, and then we + * traverse those tables. For those with variable length, we provide a + * way to lookup the size inside the event/notification's + * payload. This allows device-specific event size tables to be + * registered. + * + * @size: Size of the payload + * + * @offset: if != 0, at offset @offset-1 starts a field with a length + * that has to be added to @size. The format of the field is + * given by @type. + * + * @type: Type and length of the offset field. Most common is LE 16 + * bits (that's why that is zero); others are there mostly to + * cover for bugs and weirdos. + */ +struct uwb_est_entry { + size_t size; + unsigned offset; + enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; +}; + +int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, + const struct uwb_est_entry *, size_t entries); +int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, + const struct uwb_est_entry *, size_t entries); +ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, + size_t len); + +/* -- Misc */ + +enum { + EDC_MAX_ERRORS = 10, + EDC_ERROR_TIMEFRAME = HZ, +}; + +/* error density counter */ +struct edc { + unsigned long timestart; + u16 errorcount; +}; + +static inline +void edc_init(struct edc *edc) +{ + edc->timestart = jiffies; +} + +/* Called when an error occured. + * This is way to determine if the number of acceptable errors per time + * period has been exceeded. It is not accurate as there are cases in which + * this scheme will not work, for example if there are periodic occurences + * of errors that straddle updates to the start time. This scheme is + * sufficient for our usage. + * + * @returns 1 if maximum acceptable errors per timeframe has been exceeded. + */ +static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) +{ + unsigned long now; + + now = jiffies; + if (now - err_hist->timestart > timeframe) { + err_hist->errorcount = 1; + err_hist->timestart = now; + } else if (++err_hist->errorcount > max_err) { + err_hist->errorcount = 0; + err_hist->timestart = now; + return 1; + } + return 0; +} + + +/* Information Element handling */ + +/* For representing the state of writing to a buffer when iterating */ +struct uwb_buf_ctx { + char *buf; + size_t bytes, size; +}; + +typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, + size_t, void *); +struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); +ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, + const void *buf, size_t size); +int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, + size_t, void *); +int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); +struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); + + +/* + * Transmission statistics + * + * UWB uses LQI and RSSI (one byte values) for reporting radio signal + * strength and line quality indication. We do quick and dirty + * averages of those. They are signed values, btw. + * + * For 8 bit quantities, we keep the min, the max, an accumulator + * (@sigma) and a # of samples. When @samples gets to 255, we compute + * the average (@sigma / @samples), place it in @sigma and reset + * @samples to 1 (so we use it as the first sample). + * + * Now, statistically speaking, probably I am kicking the kidneys of + * some books I have in my shelves collecting dust, but I just want to + * get an approx, not the Nobel. + * + * LOCKING: there is no locking per se, but we try to keep a lockless + * schema. Only _add_samples() modifies the values--as long as you + * have other locking on top that makes sure that no two calls of + * _add_sample() happen at the same time, then we are fine. Now, for + * resetting the values we just set @samples to 0 and that makes the + * next _add_sample() to start with defaults. Reading the values in + * _show() currently can race, so you need to make sure the calls are + * under the same lock that protects calls to _add_sample(). FIXME: + * currently unlocked (It is not ultraprecise but does the trick. Bite + * me). + */ +struct stats { + s8 min, max; + s16 sigma; + atomic_t samples; +}; + +static inline +void stats_init(struct stats *stats) +{ + atomic_set(&stats->samples, 0); + wmb(); +} + +static inline +void stats_add_sample(struct stats *stats, s8 sample) +{ + s8 min, max; + s16 sigma; + unsigned samples = atomic_read(&stats->samples); + if (samples == 0) { /* it was zero before, so we initialize */ + min = 127; + max = -128; + sigma = 0; + } else { + min = stats->min; + max = stats->max; + sigma = stats->sigma; + } + + if (sample < min) /* compute new values */ + min = sample; + else if (sample > max) + max = sample; + sigma += sample; + + stats->min = min; /* commit */ + stats->max = max; + stats->sigma = sigma; + if (atomic_add_return(1, &stats->samples) > 255) { + /* wrapped around! reset */ + stats->sigma = sigma / 256; + atomic_set(&stats->samples, 1); + } +} + +static inline ssize_t stats_show(struct stats *stats, char *buf) +{ + int min, max, avg; + int samples = atomic_read(&stats->samples); + if (samples == 0) + min = max = avg = 0; + else { + min = stats->min; + max = stats->max; + avg = stats->sigma / samples; + } + return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); +} + +static inline ssize_t stats_store(struct stats *stats, const char *buf, + size_t size) +{ + stats_init(stats); + return size; +} + +#endif /* #ifndef __LINUX__UWB_H__ */ diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h new file mode 100644 index 0000000..1141f41 --- /dev/null +++ b/include/linux/uwb/debug-cmd.h @@ -0,0 +1,57 @@ +/* + * Ultra Wide Band + * Debug interface commands + * + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __LINUX__UWB__DEBUG_CMD_H__ +#define __LINUX__UWB__DEBUG_CMD_H__ + +#include + +/* + * Debug interface commands + * + * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. + * + * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. + */ + +enum uwb_dbg_cmd_type { + UWB_DBG_CMD_RSV_ESTABLISH = 1, + UWB_DBG_CMD_RSV_TERMINATE = 2, +}; + +struct uwb_dbg_cmd_rsv_establish { + __u8 target[6]; + __u8 type; + __u16 max_mas; + __u16 min_mas; + __u8 sparsity; +}; + +struct uwb_dbg_cmd_rsv_terminate { + int index; +}; + +struct uwb_dbg_cmd { + __u32 type; + union { + struct uwb_dbg_cmd_rsv_establish rsv_establish; + struct uwb_dbg_cmd_rsv_terminate rsv_terminate; + }; +}; + +#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h new file mode 100644 index 0000000..a86a73f --- /dev/null +++ b/include/linux/uwb/debug.h @@ -0,0 +1,82 @@ +/* + * Ultra Wide Band + * Debug Support + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: doc + * Invoke like: + * + * #define D_LOCAL 4 + * #include + * + * At the end of your include files. + */ +#include + +struct device; +extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); + +/* Master debug switch; !0 enables, 0 disables */ +#define D_MASTER (!0) + +/* Local (per-file) debug switch; #define before #including */ +#ifndef D_LOCAL +#define D_LOCAL 0 +#endif + +#undef __d_printf +#undef d_fnstart +#undef d_fnend +#undef d_printf +#undef d_dump + +#define __d_printf(l, _tag, _dev, f, a...) \ +do { \ + struct device *__dev = (_dev); \ + if (D_MASTER && D_LOCAL >= (l)) { \ + char __head[64] = ""; \ + if (_dev != NULL) { \ + if ((unsigned long)__dev < 4096) \ + printk(KERN_ERR "E: Corrupt dev %p\n", \ + __dev); \ + else \ + snprintf(__head, sizeof(__head), \ + "%s %s: ", \ + dev_driver_string(__dev), \ + __dev->bus_id); \ + } \ + printk(KERN_ERR "%s%s" _tag ": " f, __head, \ + __func__, ## a); \ + } \ +} while (0 && _dev) + +#define d_fnstart(l, _dev, f, a...) \ + __d_printf(l, " FNSTART", _dev, f, ## a) +#define d_fnend(l, _dev, f, a...) \ + __d_printf(l, " FNEND", _dev, f, ## a) +#define d_printf(l, _dev, f, a...) \ + __d_printf(l, "", _dev, f, ## a) +#define d_dump(l, _dev, ptr, size) \ +do { \ + struct device *__dev = _dev; \ + if (D_MASTER && D_LOCAL >= (l)) \ + dump_bytes(__dev, ptr, size); \ +} while (0 && _dev) +#define d_test(l) (D_MASTER && D_LOCAL >= (l)) diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h new file mode 100644 index 0000000..198c15f --- /dev/null +++ b/include/linux/uwb/spec.h @@ -0,0 +1,727 @@ +/* + * Ultra Wide Band + * UWB Standard definitions + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * All these definitions are based on the ECMA-368 standard. + * + * Note all definitions are Little Endian in the wire, and we will + * convert them to host order before operating on the bitfields (that + * yes, we use extensively). + */ + +#ifndef __LINUX__UWB_SPEC_H__ +#define __LINUX__UWB_SPEC_H__ + +#include +#include + +#define i1480_FW 0x00000303 +/* #define i1480_FW 0x00000302 */ + +/** + * Number of Medium Access Slots in a superframe. + * + * UWB divides time in SuperFrames, each one divided in 256 pieces, or + * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the + * basic bandwidth allocation unit in UWB. + */ +enum { UWB_NUM_MAS = 256 }; + +/** + * Number of Zones in superframe. + * + * UWB divides the superframe into zones with numbering starting from BPST. + * See MBOA MAC[16.8.6] + */ +enum { UWB_NUM_ZONES = 16 }; + +/* + * Number of MAS in a zone. + */ +#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) + +/* + * Number of streams per DRP reservation between a pair of devices. + * + * [ECMA-368] section 16.8.6. + */ +enum { UWB_NUM_STREAMS = 8 }; + +/* + * mMasLength + * + * The length of a MAS in microseconds. + * + * [ECMA-368] section 17.16. + */ +enum { UWB_MAS_LENGTH_US = 256 }; + +/* + * mBeaconSlotLength + * + * The length of the beacon slot in microseconds. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; + +/* + * mMaxLostBeacons + * + * The number beacons missing in consecutive superframes before a + * device can be considered as unreachable. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_MAX_LOST_BEACONS = 3 }; + +/* + * Length of a superframe in microseconds. + */ +#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) + +/** + * UWB MAC address + * + * It is *imperative* that this struct is exactly 6 packed bytes (as + * it is also used to define headers sent down and up the wire/radio). + */ +struct uwb_mac_addr { + u8 data[6]; +} __attribute__((packed)); + + +/** + * UWB device address + * + * It is *imperative* that this struct is exactly 6 packed bytes (as + * it is also used to define headers sent down and up the wire/radio). + */ +struct uwb_dev_addr { + u8 data[2]; +} __attribute__((packed)); + + +/** + * Types of UWB addresses + * + * Order matters (by size). + */ +enum uwb_addr_type { + UWB_ADDR_DEV = 0, + UWB_ADDR_MAC = 1, +}; + + +/** Size of a char buffer for printing a MAC/device address */ +enum { UWB_ADDR_STRSIZE = 32 }; + + +/** UWB WiMedia protocol IDs. */ +enum uwb_prid { + UWB_PRID_WLP_RESERVED = 0x0000, + UWB_PRID_WLP = 0x0001, + UWB_PRID_WUSB_BOT = 0x0010, + UWB_PRID_WUSB = 0x0010, + UWB_PRID_WUSB_TOP = 0x001F, +}; + + +/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ +enum uwb_phy_rate { + UWB_PHY_RATE_53 = 0, + UWB_PHY_RATE_80, + UWB_PHY_RATE_106, + UWB_PHY_RATE_160, + UWB_PHY_RATE_200, + UWB_PHY_RATE_320, + UWB_PHY_RATE_400, + UWB_PHY_RATE_480, + UWB_PHY_RATE_INVALID +}; + + +/** + * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) + */ +enum uwb_scan_type { + UWB_SCAN_ONLY = 0, + UWB_SCAN_OUTSIDE_BP, + UWB_SCAN_WHILE_INACTIVE, + UWB_SCAN_DISABLED, + UWB_SCAN_ONLY_STARTTIME, + UWB_SCAN_TOP +}; + + +/** ACK Policy types (MBOA MAC[7.2.1.3]) */ +enum uwb_ack_pol { + UWB_ACK_NO = 0, + UWB_ACK_INM = 1, + UWB_ACK_B = 2, + UWB_ACK_B_REQ = 3, +}; + + +/** DRP reservation types ([ECMA-368 table 106) */ +enum uwb_drp_type { + UWB_DRP_TYPE_ALIEN_BP = 0, + UWB_DRP_TYPE_HARD, + UWB_DRP_TYPE_SOFT, + UWB_DRP_TYPE_PRIVATE, + UWB_DRP_TYPE_PCA, +}; + + +/** DRP Reason Codes ([ECMA-368] table 107) */ +enum uwb_drp_reason { + UWB_DRP_REASON_ACCEPTED = 0, + UWB_DRP_REASON_CONFLICT, + UWB_DRP_REASON_PENDING, + UWB_DRP_REASON_DENIED, + UWB_DRP_REASON_MODIFIED, +}; + +/** + * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) + */ +enum uwb_drp_notif_reason { + UWB_DRP_NOTIF_DRP_IE_RCVD = 0, + UWB_DRP_NOTIF_CONFLICT, + UWB_DRP_NOTIF_TERMINATE, +}; + + +/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ +struct uwb_drp_alloc { + __le16 zone_bm; + __le16 mas_bm; +} __attribute__((packed)); + + +/** General MAC Header format (ECMA-368[16.2]) */ +struct uwb_mac_frame_hdr { + __le16 Frame_Control; + struct uwb_dev_addr DestAddr; + struct uwb_dev_addr SrcAddr; + __le16 Sequence_Control; + __le16 Access_Information; +} __attribute__((packed)); + + +/** + * uwb_beacon_frame - a beacon frame including MAC headers + * + * [ECMA] section 16.3. + */ +struct uwb_beacon_frame { + struct uwb_mac_frame_hdr hdr; + struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ + u8 Beacon_Slot_Number; + u8 Device_Control; + u8 IEData[]; +} __attribute__((packed)); + + +/** Information Element codes (MBOA MAC[T54]) */ +enum uwb_ie { + UWB_PCA_AVAILABILITY = 2, + UWB_IE_DRP_AVAILABILITY = 8, + UWB_IE_DRP = 9, + UWB_BP_SWITCH_IE = 11, + UWB_MAC_CAPABILITIES_IE = 12, + UWB_PHY_CAPABILITIES_IE = 13, + UWB_APP_SPEC_PROBE_IE = 15, + UWB_IDENTIFICATION_IE = 19, + UWB_MASTER_KEY_ID_IE = 20, + UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ + UWB_APP_SPEC_IE = 255, +}; + + +/** + * Header common to all Information Elements (IEs) + */ +struct uwb_ie_hdr { + u8 element_id; /* enum uwb_ie */ + u8 length; +} __attribute__((packed)); + + +/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ +struct uwb_ie_drp { + struct uwb_ie_hdr hdr; + __le16 drp_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; +} + +static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; +} + +static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; +} + +static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; +} + +static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; +} + +static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; +} + +static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) +{ + return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; +} + +static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, + enum uwb_drp_reason reason_code) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); + ie->drp_control = cpu_to_le16(drp_control); +} + +static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) +{ + u16 drp_control = le16_to_cpu(ie->drp_control); + drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); + ie->drp_control = cpu_to_le16(drp_control); +} + +/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ +struct uwb_ie_drp_avail { + struct uwb_ie_hdr hdr; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); +} __attribute__((packed)); + +/** + * The Vendor ID is set to an OUI that indicates the vendor of the device. + * ECMA-368 [16.8.10] + */ +struct uwb_vendor_id { + u8 data[3]; +} __attribute__((packed)); + +/** + * The device type ID + * FIXME: clarify what this means + * ECMA-368 [16.8.10] + */ +struct uwb_device_type_id { + u8 data[3]; +} __attribute__((packed)); + + +/** + * UWB device information types + * ECMA-368 [16.8.10] + */ +enum uwb_dev_info_type { + UWB_DEV_INFO_VENDOR_ID = 0, + UWB_DEV_INFO_VENDOR_TYPE, + UWB_DEV_INFO_NAME, +}; + +/** + * UWB device information found in Identification IE + * ECMA-368 [16.8.10] + */ +struct uwb_dev_info { + u8 type; /* enum uwb_dev_info_type */ + u8 length; + u8 data[]; +} __attribute__((packed)); + +/** + * UWB Identification IE + * ECMA-368 [16.8.10] + */ +struct uwb_identification_ie { + struct uwb_ie_hdr hdr; + struct uwb_dev_info info[]; +} __attribute__((packed)); + +/* + * UWB Radio Controller + * + * These definitions are common to the Radio Control layers as + * exported by the WUSB1.0 HWA and WHCI interfaces. + */ + +/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ +struct uwb_rccb { + u8 bCommandType; /* enum hwa_cet */ + __le16 wCommand; /* Command code */ + u8 bCommandContext; /* Context ID */ +} __attribute__((packed)); + + +/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ +struct uwb_rceb { + u8 bEventType; /* enum hwa_cet */ + __le16 wEvent; /* Event code */ + u8 bEventContext; /* Context ID */ +} __attribute__((packed)); + + +enum { + UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ + UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ +}; + +/* Commands to the radio controller */ +enum uwb_rc_cmd { + UWB_RC_CMD_CHANNEL_CHANGE = 16, + UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ + UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ + UWB_RC_CMD_RESET = 19, + UWB_RC_CMD_SCAN = 20, /* Scan management */ + UWB_RC_CMD_SET_BEACON_FILTER = 21, + UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ + UWB_RC_CMD_SET_IE = 23, /* Information Element management */ + UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, + UWB_RC_CMD_SET_TX_POWER = 25, + UWB_RC_CMD_SLEEP = 26, + UWB_RC_CMD_START_BEACON = 27, + UWB_RC_CMD_STOP_BEACON = 28, + UWB_RC_CMD_BP_MERGE = 29, + UWB_RC_CMD_SEND_COMMAND_FRAME = 30, + UWB_RC_CMD_SET_ASIE_NOTIF = 31, +}; + +/* Notifications from the radio controller */ +enum uwb_rc_evt { + UWB_RC_EVT_IE_RCV = 0, + UWB_RC_EVT_BEACON = 1, + UWB_RC_EVT_BEACON_SIZE = 2, + UWB_RC_EVT_BPOIE_CHANGE = 3, + UWB_RC_EVT_BP_SLOT_CHANGE = 4, + UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, + UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, + UWB_RC_EVT_DRP_AVAIL = 7, + UWB_RC_EVT_DRP = 8, + UWB_RC_EVT_BP_SWITCH_STATUS = 9, + UWB_RC_EVT_CMD_FRAME_RCV = 10, + UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, + /* Events (command responses) use the same code as the command */ + UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, +}; + +enum uwb_rc_extended_type_1_cmd { + UWB_RC_SET_DAA_ENERGY_MASK = 32, + UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, +}; + +enum uwb_rc_extended_type_1_evt { + UWB_RC_DAA_ENERGY_DETECTED = 0, +}; + +/* Radio Control Result Code. [WHCI] table 3-3. */ +enum { + UWB_RC_RES_SUCCESS = 0, + UWB_RC_RES_FAIL, + UWB_RC_RES_FAIL_HARDWARE, + UWB_RC_RES_FAIL_NO_SLOTS, + UWB_RC_RES_FAIL_BEACON_TOO_LARGE, + UWB_RC_RES_FAIL_INVALID_PARAMETER, + UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, + UWB_RC_RES_FAIL_INVALID_IE_DATA, + UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, + UWB_RC_RES_FAIL_CANCELLED, + UWB_RC_RES_FAIL_INVALID_STATE, + UWB_RC_RES_FAIL_INVALID_SIZE, + UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, + UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, + UWB_RC_RES_FAIL_TIME_OUT = 255, +}; + +/* Confirm event. [WHCI] section 3.1.3.1 etc. */ +struct uwb_rc_evt_confirm { + struct uwb_rceb rceb; + u8 bResultCode; +} __attribute__((packed)); + +/* Device Address Management event. [WHCI] section 3.1.3.2. */ +struct uwb_rc_evt_dev_addr_mgmt { + struct uwb_rceb rceb; + u8 baAddr[6]; + u8 bResultCode; +} __attribute__((packed)); + + +/* Get IE Event. [WHCI] section 3.1.3.3. */ +struct uwb_rc_evt_get_ie { + struct uwb_rceb rceb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Set DRP IE Event. [WHCI] section 3.1.3.7. */ +struct uwb_rc_evt_set_drp_ie { + struct uwb_rceb rceb; + __le16 wRemainingSpace; + u8 bResultCode; +} __attribute__((packed)); + +/* Set IE Event. [WHCI] section 3.1.3.8. */ +struct uwb_rc_evt_set_ie { + struct uwb_rceb rceb; + __le16 RemainingSpace; + u8 bResultCode; +} __attribute__((packed)); + +/* Scan command. [WHCI] 3.1.3.5. */ +struct uwb_rc_cmd_scan { + struct uwb_rccb rccb; + u8 bChannelNumber; + u8 bScanState; + __le16 wStartTime; +} __attribute__((packed)); + +/* Set DRP IE command. [WHCI] section 3.1.3.7. */ +struct uwb_rc_cmd_set_drp_ie { + struct uwb_rccb rccb; + __le16 wIELength; + struct uwb_ie_drp IEData[]; +} __attribute__((packed)); + +/* Set IE command. [WHCI] section 3.1.3.8. */ +struct uwb_rc_cmd_set_ie { + struct uwb_rccb rccb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ +struct uwb_rc_evt_set_daa_energy_mask { + struct uwb_rceb rceb; + __le16 wLength; + u8 result; +} __attribute__((packed)); + +/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ +struct uwb_rc_evt_set_notification_filter_ex { + struct uwb_rceb rceb; + __le16 wLength; + u8 result; +} __attribute__((packed)); + +/* IE Received notification. [WHCI] section 3.1.4.1. */ +struct uwb_rc_evt_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr SrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* Type of the received beacon. [WHCI] section 3.1.4.2. */ +enum uwb_rc_beacon_type { + UWB_RC_BEACON_TYPE_SCAN = 0, + UWB_RC_BEACON_TYPE_NEIGHBOR, + UWB_RC_BEACON_TYPE_OL_ALIEN, + UWB_RC_BEACON_TYPE_NOL_ALIEN, +}; + +/* Beacon received notification. [WHCI] 3.1.4.2. */ +struct uwb_rc_evt_beacon { + struct uwb_rceb rceb; + u8 bChannelNumber; + u8 bBeaconType; + __le16 wBPSTOffset; + u8 bLQI; + u8 bRSSI; + __le16 wBeaconInfoLength; + u8 BeaconInfo[]; +} __attribute__((packed)); + + +/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ +struct uwb_rc_evt_beacon_size { + struct uwb_rceb rceb; + __le16 wNewBeaconSize; +} __attribute__((packed)); + + +/* BPOIE Change notification. [WHCI] section 3.1.4.4. */ +struct uwb_rc_evt_bpoie_change { + struct uwb_rceb rceb; + __le16 wBPOIELength; + u8 BPOIE[]; +} __attribute__((packed)); + + +/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ +struct uwb_rc_evt_bp_slot_change { + struct uwb_rceb rceb; + u8 slot_info; +} __attribute__((packed)); + +static inline int uwb_rc_evt_bp_slot_change_slot_num( + const struct uwb_rc_evt_bp_slot_change *evt) +{ + return evt->slot_info & 0x7f; +} + +static inline int uwb_rc_evt_bp_slot_change_no_slot( + const struct uwb_rc_evt_bp_slot_change *evt) +{ + return (evt->slot_info & 0x80) >> 7; +} + +/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ +struct uwb_rc_evt_bp_switch_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ +struct uwb_rc_evt_dev_addr_conflict { + struct uwb_rceb rceb; +} __attribute__((packed)); + +/* DRP notification. [WHCI] section 3.1.4.9. */ +struct uwb_rc_evt_drp { + struct uwb_rceb rceb; + struct uwb_dev_addr src_addr; + u8 reason; + u8 beacon_slot_number; + __le16 ie_length; + u8 ie_data[]; +} __attribute__((packed)); + +static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) +{ + return evt->reason & 0x0f; +} + + +/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ +struct uwb_rc_evt_drp_avail { + struct uwb_rceb rceb; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); +} __attribute__((packed)); + +/* BP switch status notification. [WHCI] section 3.1.4.10. */ +struct uwb_rc_evt_bp_switch_status { + struct uwb_rceb rceb; + u8 status; + u8 slot_offset; + __le16 bpst_offset; + u8 move_countdown; +} __attribute__((packed)); + +/* Command Frame Received notification. [WHCI] section 3.1.4.11. */ +struct uwb_rc_evt_cmd_frame_rcv { + struct uwb_rceb rceb; + __le16 receive_time; + struct uwb_dev_addr wSrcAddr; + struct uwb_dev_addr wDstAddr; + __le16 control; + __le16 reserved; + __le16 dataLength; + u8 data[]; +} __attribute__((packed)); + +/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ +struct uwb_rc_evt_channel_change_ie_rcv { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ +struct uwb_rc_evt_daa_energy_detected { + struct uwb_rceb rceb; + __le16 wLength; + u8 bandID; + u8 reserved; + u8 toneBmp[16]; +} __attribute__((packed)); + + +/** + * Radio Control Interface Class Descriptor + * + * WUSB 1.0 [8.6.1.2] + */ +struct uwb_rc_control_intf_class_desc { + u8 bLength; + u8 bDescriptorType; + __le16 bcdRCIVersion; +} __attribute__((packed)); + +#endif /* #ifndef __LINUX__UWB_SPEC_H__ */ diff --git a/include/linux/wlp.h b/include/linux/wlp.h new file mode 100644 index 0000000..033545e --- /dev/null +++ b/include/linux/wlp.h @@ -0,0 +1,735 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * + * Copyright (C) 2005-2006 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * - Does not (yet) include support for WLP control frames + * WLP Draft 0.99 [6.5]. + * + * A visual representation of the data structures. + * + * wssidB wssidB + * ^ ^ + * | | + * wssidA wssidA + * wlp interface { ^ ^ + * ... | | + * ... ... wssid wssid ... + * wlp --- ... | | + * }; neighbors --> neighbA --> neighbB + * ... + * wss + * ... + * eda cache --> neighborA --> neighborB --> neighborC ... + */ + +#ifndef __LINUX__WLP_H_ +#define __LINUX__WLP_H_ + +#include +#include +#include +#include + +/** + * WLP Protocol ID + * WLP Draft 0.99 [6.2] + * + * The MUX header for all WLP frames + */ +#define WLP_PROTOCOL_ID 0x0100 + +/** + * WLP Version + * WLP version placed in the association frames (WLP 0.99 [6.6]) + */ +#define WLP_VERSION 0x10 + +/** + * Bytes needed to print UUID as string + */ +#define WLP_WSS_UUID_STRSIZE 48 + +/** + * Bytes needed to print nonce as string + */ +#define WLP_WSS_NONCE_STRSIZE 48 + + +/** + * Size used for WLP name size + * + * The WSS name is set to 65 bytes, 1 byte larger than the maximum + * allowed by the WLP spec. This is to have a null terminated string + * for display to the user. A maximum of 64 bytes will still be used + * when placing the WSS name field in association frames. + */ +#define WLP_WSS_NAME_SIZE 65 + +/** + * Number of bytes added by WLP to data frame + * + * A data frame transmitted from a host will be placed in a Standard or + * Abbreviated WLP frame. These have an extra 4 bytes of header (struct + * wlp_frame_std_abbrv_hdr). + * When the stack sends this data frame for transmission it needs to ensure + * there is enough headroom for this header. + */ +#define WLP_DATA_HLEN 4 + +/** + * State of device regarding WLP Service Set + * + * WLP_WSS_STATE_NONE: the host does not participate in any WSS + * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence + * ("Partial Enroll"). This state is used to + * indicate the first part of enrollment that is + * unsecure. If the WSS is unsecure then the + * state will promptly go to WLP_WSS_STATE_ENROLLED, + * if the WSS is not secure then the enrollment + * procedure is a few more steps before we are + * enrolled. + * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS + * WLP_WSS_STATE_ACTIVE: WSS is activated + * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS + * + */ +enum wlp_wss_state { + WLP_WSS_STATE_NONE = 0, + WLP_WSS_STATE_PART_ENROLLED, + WLP_WSS_STATE_ENROLLED, + WLP_WSS_STATE_ACTIVE, + WLP_WSS_STATE_CONNECTED, +}; + +/** + * WSS Secure status + * WLP 0.99 Table 6 + * + * Set to one if the WSS is secure, zero if it is not secure + */ +enum wlp_wss_sec_status { + WLP_WSS_UNSECURE = 0, + WLP_WSS_SECURE, +}; + +/** + * WLP frame type + * WLP Draft 0.99 [6.2 Table 1] + */ +enum wlp_frame_type { + WLP_FRAME_STANDARD = 0, + WLP_FRAME_ABBREVIATED, + WLP_FRAME_CONTROL, + WLP_FRAME_ASSOCIATION, +}; + +/** + * WLP Association Message Type + * WLP Draft 0.99 [6.6.1.2 Table 8] + */ +enum wlp_assoc_type { + WLP_ASSOC_D1 = 2, + WLP_ASSOC_D2 = 3, + WLP_ASSOC_M1 = 4, + WLP_ASSOC_M2 = 5, + WLP_ASSOC_M3 = 7, + WLP_ASSOC_M4 = 8, + WLP_ASSOC_M5 = 9, + WLP_ASSOC_M6 = 10, + WLP_ASSOC_M7 = 11, + WLP_ASSOC_M8 = 12, + WLP_ASSOC_F0 = 14, + WLP_ASSOC_E1 = 32, + WLP_ASSOC_E2 = 33, + WLP_ASSOC_C1 = 34, + WLP_ASSOC_C2 = 35, + WLP_ASSOC_C3 = 36, + WLP_ASSOC_C4 = 37, +}; + +/** + * WLP Attribute Type + * WLP Draft 0.99 [6.6.1 Table 6] + */ +enum wlp_attr_type { + WLP_ATTR_AUTH = 0x1005, /* Authenticator */ + WLP_ATTR_DEV_NAME = 0x1011, /* Device Name */ + WLP_ATTR_DEV_PWD_ID = 0x1012, /* Device Password ID */ + WLP_ATTR_E_HASH1 = 0x1014, /* E-Hash1 */ + WLP_ATTR_E_HASH2 = 0x1015, /* E-Hash2 */ + WLP_ATTR_E_SNONCE1 = 0x1016, /* E-SNonce1 */ + WLP_ATTR_E_SNONCE2 = 0x1017, /* E-SNonce2 */ + WLP_ATTR_ENCR_SET = 0x1018, /* Encrypted Settings */ + WLP_ATTR_ENRL_NONCE = 0x101A, /* Enrollee Nonce */ + WLP_ATTR_KEYWRAP_AUTH = 0x101E, /* Key Wrap Authenticator */ + WLP_ATTR_MANUF = 0x1021, /* Manufacturer */ + WLP_ATTR_MSG_TYPE = 0x1022, /* Message Type */ + WLP_ATTR_MODEL_NAME = 0x1023, /* Model Name */ + WLP_ATTR_MODEL_NR = 0x1024, /* Model Number */ + WLP_ATTR_PUB_KEY = 0x1032, /* Public Key */ + WLP_ATTR_REG_NONCE = 0x1039, /* Registrar Nonce */ + WLP_ATTR_R_HASH1 = 0x103D, /* R-Hash1 */ + WLP_ATTR_R_HASH2 = 0x103E, /* R-Hash2 */ + WLP_ATTR_R_SNONCE1 = 0x103F, /* R-SNonce1 */ + WLP_ATTR_R_SNONCE2 = 0x1040, /* R-SNonce2 */ + WLP_ATTR_SERIAL = 0x1042, /* Serial number */ + WLP_ATTR_UUID_E = 0x1047, /* UUID-E */ + WLP_ATTR_UUID_R = 0x1048, /* UUID-R */ + WLP_ATTR_PRI_DEV_TYPE = 0x1054, /* Primary Device Type */ + WLP_ATTR_SEC_DEV_TYPE = 0x1055, /* Secondary Device Type */ + WLP_ATTR_PORT_DEV = 0x1056, /* Portable Device */ + WLP_ATTR_APP_EXT = 0x1058, /* Application Extension */ + WLP_ATTR_WLP_VER = 0x2000, /* WLP Version */ + WLP_ATTR_WSSID = 0x2001, /* WSSID */ + WLP_ATTR_WSS_NAME = 0x2002, /* WSS Name */ + WLP_ATTR_WSS_SEC_STAT = 0x2003, /* WSS Secure Status */ + WLP_ATTR_WSS_BCAST = 0x2004, /* WSS Broadcast Address */ + WLP_ATTR_WSS_M_KEY = 0x2005, /* WSS Master Key */ + WLP_ATTR_ACC_ENRL = 0x2006, /* Accepting Enrollment */ + WLP_ATTR_WSS_INFO = 0x2007, /* WSS Information */ + WLP_ATTR_WSS_SEL_MTHD = 0x2008, /* WSS Selection Method */ + WLP_ATTR_ASSC_MTHD_LIST = 0x2009, /* Association Methods List */ + WLP_ATTR_SEL_ASSC_MTHD = 0x200A, /* Selected Association Method */ + WLP_ATTR_ENRL_HASH_COMM = 0x200B, /* Enrollee Hash Commitment */ + WLP_ATTR_WSS_TAG = 0x200C, /* WSS Tag */ + WLP_ATTR_WSS_VIRT = 0x200D, /* WSS Virtual EUI-48 */ + WLP_ATTR_WLP_ASSC_ERR = 0x200E, /* WLP Association Error */ + WLP_ATTR_VNDR_EXT = 0x200F, /* Vendor Extension */ +}; + +/** + * WLP Category ID of primary/secondary device + * WLP Draft 0.99 [6.6.1.8 Table 12] + */ +enum wlp_dev_category_id { + WLP_DEV_CAT_COMPUTER = 1, + WLP_DEV_CAT_INPUT, + WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER, + WLP_DEV_CAT_CAMERA, + WLP_DEV_CAT_STORAGE, + WLP_DEV_CAT_INFRASTRUCTURE, + WLP_DEV_CAT_DISPLAY, + WLP_DEV_CAT_MULTIM, + WLP_DEV_CAT_GAMING, + WLP_DEV_CAT_TELEPHONE, + WLP_DEV_CAT_OTHER = 65535, +}; + +/** + * WLP WSS selection method + * WLP Draft 0.99 [6.6.1.6 Table 10] + */ +enum wlp_wss_sel_mthd { + WLP_WSS_ENRL_SELECT = 1, /* Enrollee selects */ + WLP_WSS_REG_SELECT, /* Registrar selects */ +}; + +/** + * WLP association error values + * WLP Draft 0.99 [6.6.1.5 Table 9] + */ +enum wlp_assc_error { + WLP_ASSOC_ERROR_NONE, + WLP_ASSOC_ERROR_AUTH, /* Authenticator Failure */ + WLP_ASSOC_ERROR_ROGUE, /* Rogue activity suspected */ + WLP_ASSOC_ERROR_BUSY, /* Device busy */ + WLP_ASSOC_ERROR_LOCK, /* Setup Locked */ + WLP_ASSOC_ERROR_NOT_READY, /* Registrar not ready */ + WLP_ASSOC_ERROR_INV, /* Invalid WSS selection */ + WLP_ASSOC_ERROR_MSG_TIME, /* Message timeout */ + WLP_ASSOC_ERROR_ENR_TIME, /* Enrollment session timeout */ + WLP_ASSOC_ERROR_PW, /* Device password invalid */ + WLP_ASSOC_ERROR_VER, /* Unsupported version */ + WLP_ASSOC_ERROR_INT, /* Internal error */ + WLP_ASSOC_ERROR_UNDEF, /* Undefined error */ + WLP_ASSOC_ERROR_NUM, /* Numeric comparison failure */ + WLP_ASSOC_ERROR_WAIT, /* Waiting for user input */ +}; + +/** + * WLP Parameters + * WLP 0.99 [7.7] + */ +enum wlp_parameters { + WLP_PER_MSG_TIMEOUT = 15, /* Seconds to wait for response to + association message. */ +}; + +/** + * WLP IE + * + * The WLP IE should be included in beacons by all devices. + * + * The driver can set only a few of the fields in this information element, + * most fields are managed by the device self. When the driver needs to set + * a field it will only provide values for the fields of interest, the rest + * will be filled with zeroes. The fields of interest are: + * + * Element ID + * Length + * Capabilities (only to include WSSID Hash list length) + * WSSID Hash List fields + * + * WLP 0.99 [6.7] + * + * Only the fields that will be used are detailed in this structure, rest + * are not detailed or marked as "notused". + */ +struct wlp_ie { + struct uwb_ie_hdr hdr; + __le16 capabilities; + __le16 cycle_param; + __le16 acw_anchor_addr; + u8 wssid_hash_list[]; +} __attribute__((packed)); + +static inline int wlp_ie_hash_length(struct wlp_ie *ie) +{ + return (le16_to_cpu(ie->capabilities) >> 12) & 0xf; +} + +static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length) +{ + u16 caps = le16_to_cpu(ie->capabilities); + caps = (caps & ~(0xf << 12)) | (hash_length << 12); + ie->capabilities = cpu_to_le16(caps); +} + +/** + * WLP nonce + * WLP Draft 0.99 [6.6.1 Table 6] + * + * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee + * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so + * it is packed. + */ +struct wlp_nonce { + u8 data[16]; +} __attribute__((packed)); + +/** + * WLP UUID + * WLP Draft 0.99 [6.6.1 Table 6] + * + * Universally Unique Identifier (UUID) encoded as an octet string in the + * order the octets are shown in string representation in RFC4122. A UUID + * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed. + */ +struct wlp_uuid { + u8 data[16]; +} __attribute__((packed)); + + +/** + * Primary and secondary device type attributes + * WLP Draft 0.99 [6.6.1.8] + */ +struct wlp_dev_type { + enum wlp_dev_category_id category:16; + u8 OUI[3]; + u8 OUIsubdiv; + __le16 subID; +} __attribute__((packed)); + +/** + * WLP frame header + * WLP Draft 0.99 [6.2] + */ +struct wlp_frame_hdr { + __le16 mux_hdr; /* WLP_PROTOCOL_ID */ + enum wlp_frame_type type:8; +} __attribute__((packed)); + +/** + * WLP attribute field header + * WLP Draft 0.99 [6.6.1] + * + * Header of each attribute found in an association frame + */ +struct wlp_attr_hdr { + __le16 type; + __le16 length; +} __attribute__((packed)); + +/** + * Device information commonly used together + * + * Each of these device information elements has a specified range in which it + * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not + * include the termination null '\0' character (when used in the + * association protocol the attribute fields are accompanied + * with a "length" field so the full range from the spec can be used for + * the value). We thus allocate an extra byte to be able to store a string + * of max length with a terminating '\0'. + */ +struct wlp_device_info { + char name[33]; + char model_name[33]; + char manufacturer[65]; + char model_nr[33]; + char serial[33]; + struct wlp_dev_type prim_dev_type; +}; + +/** + * Macros for the WLP attributes + * + * There are quite a few attributes (total is 43). The attribute layout can be + * in one of three categories: one value, an array, an enum forced to 8 bits. + * These macros help with their definitions. + */ +#define wlp_attr(type, name) \ +struct wlp_attr_##name { \ + struct wlp_attr_hdr hdr; \ + type name; \ +} __attribute__((packed)); + +#define wlp_attr_array(type, name) \ +struct wlp_attr_##name { \ + struct wlp_attr_hdr hdr; \ + type name[]; \ +} __attribute__((packed)); + +/** + * WLP association attribute fields + * WLP Draft 0.99 [6.6.1 Table 6] + * + * Attributes appear in same order as the Table in the spec + * FIXME Does not define all attributes yet + */ + +/* Device name: Friendly name of sending device */ +wlp_attr_array(u8, dev_name) + +/* Enrollee Nonce: Random number generated by enrollee for an enrollment + * session */ +wlp_attr(struct wlp_nonce, enonce) + +/* Manufacturer name: Name of manufacturer of the sending device */ +wlp_attr_array(u8, manufacturer) + +/* WLP Message Type */ +wlp_attr(u8, msg_type) + +/* WLP Model name: Model name of sending device */ +wlp_attr_array(u8, model_name) + +/* WLP Model number: Model number of sending device */ +wlp_attr_array(u8, model_nr) + +/* Registrar Nonce: Random number generated by registrar for an enrollment + * session */ +wlp_attr(struct wlp_nonce, rnonce) + +/* Serial number of device */ +wlp_attr_array(u8, serial) + +/* UUID of enrollee */ +wlp_attr(struct wlp_uuid, uuid_e) + +/* UUID of registrar */ +wlp_attr(struct wlp_uuid, uuid_r) + +/* WLP Primary device type */ +wlp_attr(struct wlp_dev_type, prim_dev_type) + +/* WLP Secondary device type */ +wlp_attr(struct wlp_dev_type, sec_dev_type) + +/* WLP protocol version */ +wlp_attr(u8, version) + +/* WLP service set identifier */ +wlp_attr(struct wlp_uuid, wssid) + +/* WLP WSS name */ +wlp_attr_array(u8, wss_name) + +/* WLP WSS Secure Status */ +wlp_attr(u8, wss_sec_status) + +/* WSS Broadcast Address */ +wlp_attr(struct uwb_mac_addr, wss_bcast) + +/* WLP Accepting Enrollment */ +wlp_attr(u8, accept_enrl) + +/** + * WSS information attributes + * WLP Draft 0.99 [6.6.3 Table 15] + */ +struct wlp_wss_info { + struct wlp_attr_wssid wssid; + struct wlp_attr_wss_name name; + struct wlp_attr_accept_enrl accept; + struct wlp_attr_wss_sec_status sec_stat; + struct wlp_attr_wss_bcast bcast; +} __attribute__((packed)); + +/* WLP WSS Information */ +wlp_attr_array(struct wlp_wss_info, wss_info) + +/* WLP WSS Selection method */ +wlp_attr(u8, wss_sel_mthd) + +/* WLP WSS tag */ +wlp_attr(u8, wss_tag) + +/* WSS Virtual Address */ +wlp_attr(struct uwb_mac_addr, wss_virt) + +/* WLP association error */ +wlp_attr(u8, wlp_assc_err) + +/** + * WLP standard and abbreviated frames + * + * WLP Draft 0.99 [6.3] and [6.4] + * + * The difference between the WLP standard frame and the WLP + * abbreviated frame is that the standard frame includes the src + * and dest addresses from the Ethernet header, the abbreviated frame does + * not. + * The src/dest (as well as the type/length and client data) are already + * defined as part of the Ethernet header, we do not do this here. + * From this perspective the standard and abbreviated frames appear the + * same - they will be treated differently though. + * + * The size of this header is also captured in WLP_DATA_HLEN to enable + * interfaces to prepare their headroom. + */ +struct wlp_frame_std_abbrv_hdr { + struct wlp_frame_hdr hdr; + u8 tag; +} __attribute__((packed)); + +/** + * WLP association frames + * + * WLP Draft 0.99 [6.6] + */ +struct wlp_frame_assoc { + struct wlp_frame_hdr hdr; + enum wlp_assoc_type type:8; + struct wlp_attr_version version; + struct wlp_attr_msg_type msg_type; + u8 attr[]; +} __attribute__((packed)); + +/* Ethernet to dev address mapping */ +struct wlp_eda { + spinlock_t lock; + struct list_head cache; /* Eth<->Dev Addr cache */ +}; + +/** + * WSS information temporary storage + * + * This information is only stored temporarily during discovery. It should + * not be stored unless the device is enrolled in the advertised WSS. This + * is done mainly because we follow the letter of the spec in this regard. + * See WLP 0.99 [7.2.3]. + * When the device does become enrolled in a WSS the WSS information will + * be stored as part of the more comprehensive struct wlp_wss. + */ +struct wlp_wss_tmp_info { + char name[WLP_WSS_NAME_SIZE]; + u8 accept_enroll; + u8 sec_status; + struct uwb_mac_addr bcast; +}; + +struct wlp_wssid_e { + struct list_head node; + struct wlp_uuid wssid; + struct wlp_wss_tmp_info *info; +}; + +/** + * A cache entry of WLP neighborhood + * + * @node: head of list is wlp->neighbors + * @wssid: list of wssids of this neighbor, element is wlp_wssid_e + * @info: temporary storage for information learned during discovery. This + * storage is used together with the wssid_e temporary storage + * during discovery. + */ +struct wlp_neighbor_e { + struct list_head node; + struct wlp_uuid uuid; + struct uwb_dev *uwb_dev; + struct list_head wssid; /* Elements are wlp_wssid_e */ + struct wlp_device_info *info; +}; + +struct wlp; +/** + * Information for an association session in progress. + * + * @exp_message: The type of the expected message. Both this message and a + * F0 message (which can be sent in response to any + * association frame) will be accepted as a valid message for + * this session. + * @cb: The function that will be called upon receipt of this + * message. + * @cb_priv: Private data of callback + * @data: Data used in association process (always a sk_buff?) + * @neighbor: Address of neighbor with which association session is in + * progress. + */ +struct wlp_session { + enum wlp_assoc_type exp_message; + void (*cb)(struct wlp *); + void *cb_priv; + void *data; + struct uwb_dev_addr neighbor_addr; +}; + +/** + * WLP Service Set + * + * @mutex: used to protect entire WSS structure. + * + * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum + * allowed by the WLP spec. This is to have a null terminated string + * for display to the user. A maximum of 64 bytes will still be used + * when placing the WSS name field in association frames. + * + * @accept_enroll: Accepting enrollment: Set to one if registrar is + * accepting enrollment in WSS, or zero otherwise. + * + * Global and local information for each WSS in which we are enrolled. + * WLP 0.99 Section 7.2.1 and Section 7.2.2 + */ +struct wlp_wss { + struct mutex mutex; + struct kobject kobj; + /* Global properties. */ + struct wlp_uuid wssid; + u8 hash; + char name[WLP_WSS_NAME_SIZE]; + struct uwb_mac_addr bcast; + u8 secure_status:1; + u8 master_key[16]; + /* Local properties. */ + u8 tag; + struct uwb_mac_addr virtual_addr; + /* Extra */ + u8 accept_enroll:1; + enum wlp_wss_state state; +}; + +/** + * WLP main structure + * @mutex: protect changes to WLP structure. We only allow changes to the + * uuid, so currently this mutex only protects this field. + */ +struct wlp { + struct mutex mutex; + struct uwb_rc *rc; /* UWB radio controller */ + struct uwb_pal pal; + struct wlp_eda eda; + struct wlp_uuid uuid; + struct wlp_session *session; + struct wlp_wss wss; + struct mutex nbmutex; /* Neighbor mutex protects neighbors list */ + struct list_head neighbors; /* Elements are wlp_neighbor_e */ + struct uwb_notifs_handler uwb_notifs_handler; + struct wlp_device_info *dev_info; + void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info); + int (*xmit_frame)(struct wlp *, struct sk_buff *, + struct uwb_dev_addr *); + void (*stop_queue)(struct wlp *); + void (*start_queue)(struct wlp *); +}; + +/* sysfs */ + + +struct wlp_wss_attribute { + struct attribute attr; + ssize_t (*show)(struct wlp_wss *wss, char *buf); + ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count); +}; + +#define WSS_ATTR(_name, _mode, _show, _store) \ +static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ + _show, _store) + +extern int wlp_setup(struct wlp *, struct uwb_rc *); +extern void wlp_remove(struct wlp *); +extern ssize_t wlp_neighborhood_show(struct wlp *, char *); +extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); +extern void wlp_wss_remove(struct wlp_wss *); +extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *); +extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t); +extern ssize_t wlp_eda_show(struct wlp *, char *); +extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_uuid_show(struct wlp *, char *); +extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_name_show(struct wlp *, char *); +extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *); +extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_model_name_show(struct wlp *, char *); +extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *); +extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_serial_show(struct wlp *, char *); +extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *); +extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *, + size_t); +extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *); +extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t); +extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *); +extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *, + size_t); +extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *); +extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *, + size_t); +extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *, + struct uwb_dev_addr *); +extern int wlp_prepare_tx_frame(struct device *, struct wlp *, + struct sk_buff *, struct uwb_dev_addr *); +void wlp_reset_all(struct wlp *wlp); + +/** + * Initialize WSS + */ +static inline +void wlp_wss_init(struct wlp_wss *wss) +{ + mutex_init(&wss->mutex); +} + +static inline +void wlp_init(struct wlp *wlp) +{ + INIT_LIST_HEAD(&wlp->neighbors); + mutex_init(&wlp->mutex); + mutex_init(&wlp->nbmutex); + wlp_wss_init(&wlp->wss); +} + + +#endif /* #ifndef __LINUX__WLP_H_ */ -- cgit v0.10.2 From 183b9b592a622a7719ee38e275fd7ff3aaf74d0d Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:06 +0100 Subject: uwb: add the UWB stack (core files) UWB device and radio controller device and event management. Signed-off-by: David Vrabel diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c new file mode 100644 index 0000000..7eee8e4 --- /dev/null +++ b/drivers/uwb/driver.c @@ -0,0 +1,142 @@ +/* + * Ultra Wide Band + * Driver initialization, etc + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * Life cycle: FIXME: explain + * + * UWB radio controller: + * + * 1. alloc a uwb_rc, zero it + * 2. call uwb_rc_init() on it to set it up + ops (won't do any + * kind of allocation) + * 3. register (now it is owned by the UWB stack--deregister before + * freeing/destroying). + * 4. It lives on it's own now (UWB stack handles)--when it + * disconnects, call unregister() + * 5. free it. + * + * Make sure you have a reference to the uwb_rc before calling + * any of the UWB API functions. + * + * TODO: + * + * 1. Locking and life cycle management is crappy still. All entry + * points to the UWB HCD API assume you have a reference on the + * uwb_rc structure and that it won't go away. They mutex lock it + * before doing anything. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "uwb-internal.h" + + +/* UWB stack attributes (or 'global' constants) */ + + +/** + * If a beacon dissapears for longer than this, then we consider the + * device who was represented by that beacon to be gone. + * + * ECMA-368[17.2.3, last para] establishes that a device must not + * consider a device to be its neighbour if he doesn't receive a beacon + * for more than mMaxLostBeacons. mMaxLostBeacons is defined in + * ECMA-368[17.16] as 3; because we can get only one beacon per + * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time + * for jitter and stuff and make it 500 ms. + */ +unsigned long beacon_timeout_ms = 500; + +static +ssize_t beacon_timeout_ms_show(struct class *class, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms); +} + +static +ssize_t beacon_timeout_ms_store(struct class *class, + const char *buf, size_t size) +{ + unsigned long bt; + ssize_t result; + result = sscanf(buf, "%lu", &bt); + if (result != 1) + return -EINVAL; + beacon_timeout_ms = bt; + return size; +} + +static struct class_attribute uwb_class_attrs[] = { + __ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO, + beacon_timeout_ms_show, beacon_timeout_ms_store), + __ATTR_NULL, +}; + +/** Device model classes */ +struct class uwb_rc_class = { + .name = "uwb_rc", + .class_attrs = uwb_class_attrs, +}; + + +static int __init uwb_subsys_init(void) +{ + int result = 0; + + result = uwb_est_create(); + if (result < 0) { + printk(KERN_ERR "uwb: Can't initialize EST subsystem\n"); + goto error_est_init; + } + + result = class_register(&uwb_rc_class); + if (result < 0) + goto error_uwb_rc_class_register; + uwbd_start(); + return 0; + +error_uwb_rc_class_register: + uwb_est_destroy(); +error_est_init: + return result; +} +module_init(uwb_subsys_init); + +static void __exit uwb_subsys_exit(void) +{ + uwbd_stop(); + class_unregister(&uwb_rc_class); + uwb_est_destroy(); + return; +} +module_exit(uwb_subsys_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Ultra Wide Band core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c new file mode 100644 index 0000000..a6cb8ad --- /dev/null +++ b/drivers/uwb/lc-dev.c @@ -0,0 +1,492 @@ +/* + * Ultra Wide Band + * Life cycle of devices + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#include +#include +#include +#include +#include "uwb-internal.h" + +#define D_LOCAL 1 +#include + + +/* We initialize addresses to 0xff (invalid, as it is bcast) */ +static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) +{ + memset(&addr->data, 0xff, sizeof(addr->data)); +} + +static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr) +{ + memset(&addr->data, 0xff, sizeof(addr->data)); +} + +/* @returns !0 if a device @addr is a broadcast address */ +static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr) +{ + static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } }; + return !uwb_dev_addr_cmp(addr, &bcast); +} + +/* + * Add callback @new to be called when an event occurs in @rc. + */ +int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new) +{ + if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) + return -ERESTARTSYS; + list_add(&new->list_node, &rc->notifs_chain.list); + mutex_unlock(&rc->notifs_chain.mutex); + return 0; +} +EXPORT_SYMBOL_GPL(uwb_notifs_register); + +/* + * Remove event handler (callback) + */ +int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry) +{ + if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) + return -ERESTARTSYS; + list_del(&entry->list_node); + mutex_unlock(&rc->notifs_chain.mutex); + return 0; +} +EXPORT_SYMBOL_GPL(uwb_notifs_deregister); + +/* + * Notify all event handlers of a given event on @rc + * + * We are called with a valid reference to the device, or NULL if the + * event is not for a particular event (e.g., a BG join event). + */ +void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event) +{ + struct uwb_notifs_handler *handler; + if (mutex_lock_interruptible(&rc->notifs_chain.mutex)) + return; + if (!list_empty(&rc->notifs_chain.list)) { + list_for_each_entry(handler, &rc->notifs_chain.list, list_node) { + handler->cb(handler->data, uwb_dev, event); + } + } + mutex_unlock(&rc->notifs_chain.mutex); +} + +/* + * Release the backing device of a uwb_dev that has been dynamically allocated. + */ +static void uwb_dev_sys_release(struct device *dev) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + + d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev); + uwb_bce_put(uwb_dev->bce); + d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev); + memset(uwb_dev, 0x69, sizeof(*uwb_dev)); + kfree(uwb_dev); + d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev); +} + +/* + * Initialize a UWB device instance + * + * Alloc, zero and call this function. + */ +void uwb_dev_init(struct uwb_dev *uwb_dev) +{ + mutex_init(&uwb_dev->mutex); + device_initialize(&uwb_dev->dev); + uwb_dev->dev.release = uwb_dev_sys_release; + uwb_dev_addr_init(&uwb_dev->dev_addr); + uwb_mac_addr_init(&uwb_dev->mac_addr); + bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS); +} + +static ssize_t uwb_dev_EUI_48_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + char addr[UWB_ADDR_STRSIZE]; + + uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr); + return sprintf(buf, "%s\n", addr); +} +static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL); + +static ssize_t uwb_dev_DevAddr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + char addr[UWB_ADDR_STRSIZE]; + + uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr); + return sprintf(buf, "%s\n", addr); +} +static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL); + +/* + * Show the BPST of this device. + * + * Calculated from the receive time of the device's beacon and it's + * slot number. + */ +static ssize_t uwb_dev_BPST_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_beca_e *bce; + struct uwb_beacon_frame *bf; + u16 bpst; + + bce = uwb_dev->bce; + mutex_lock(&bce->mutex); + bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; + bpst = bce->be->wBPSTOffset + - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US); + mutex_unlock(&bce->mutex); + + return sprintf(buf, "%d\n", bpst); +} +static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL); + +/* + * Show the IEs a device is beaconing + * + * We need to access the beacon cache, so we just lock it really + * quick, print the IEs and unlock. + * + * We have a reference on the cache entry, so that should be + * quite safe. + */ +static ssize_t uwb_dev_IEs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + + return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE); +} +static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL); + +static ssize_t uwb_dev_LQE_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_beca_e *bce = uwb_dev->bce; + size_t result; + + mutex_lock(&bce->mutex); + result = stats_show(&uwb_dev->bce->lqe_stats, buf); + mutex_unlock(&bce->mutex); + return result; +} + +static ssize_t uwb_dev_LQE_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_beca_e *bce = uwb_dev->bce; + ssize_t result; + + mutex_lock(&bce->mutex); + result = stats_store(&uwb_dev->bce->lqe_stats, buf, size); + mutex_unlock(&bce->mutex); + return result; +} +static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store); + +static ssize_t uwb_dev_RSSI_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_beca_e *bce = uwb_dev->bce; + size_t result; + + mutex_lock(&bce->mutex); + result = stats_show(&uwb_dev->bce->rssi_stats, buf); + mutex_unlock(&bce->mutex); + return result; +} + +static ssize_t uwb_dev_RSSI_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_beca_e *bce = uwb_dev->bce; + ssize_t result; + + mutex_lock(&bce->mutex); + result = stats_store(&uwb_dev->bce->rssi_stats, buf, size); + mutex_unlock(&bce->mutex); + return result; +} +static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store); + + +static struct attribute *dev_attrs[] = { + &dev_attr_EUI_48.attr, + &dev_attr_DevAddr.attr, + &dev_attr_BPST.attr, + &dev_attr_IEs.attr, + &dev_attr_LQE.attr, + &dev_attr_RSSI.attr, + NULL, +}; + +static struct attribute_group dev_attr_group = { + .attrs = dev_attrs, +}; + +static struct attribute_group *groups[] = { + &dev_attr_group, + NULL, +}; + +/** + * Device SYSFS registration + * + * + */ +static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) +{ + int result; + struct device *dev; + + d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev); + BUG_ON(parent_dev == NULL); + + dev = &uwb_dev->dev; + /* Device sysfs files are only useful for neighbor devices not + local radio controllers. */ + if (&uwb_dev->rc->uwb_dev != uwb_dev) + dev->groups = groups; + dev->parent = parent_dev; + dev_set_drvdata(dev, uwb_dev); + + result = device_add(dev); + d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result); + return result; +} + + +static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) +{ + d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev); + dev_set_drvdata(&uwb_dev->dev, NULL); + device_del(&uwb_dev->dev); + d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev); +} + + +/** + * Register and initialize a new UWB device + * + * Did you call uwb_dev_init() on it? + * + * @parent_rc: is the parent radio controller who has the link to the + * device. When registering the UWB device that is a UWB + * Radio Controller, we point back to it. + * + * If registering the device that is part of a radio, caller has set + * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will + * be allocated. + */ +int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, + struct uwb_rc *parent_rc) +{ + int result; + struct device *dev; + + BUG_ON(uwb_dev == NULL); + BUG_ON(parent_dev == NULL); + BUG_ON(parent_rc == NULL); + + mutex_lock(&uwb_dev->mutex); + dev = &uwb_dev->dev; + uwb_dev->rc = parent_rc; + result = __uwb_dev_sys_add(uwb_dev, parent_dev); + if (result < 0) + printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n", + dev_name(dev), result); + mutex_unlock(&uwb_dev->mutex); + return result; +} + + +void uwb_dev_rm(struct uwb_dev *uwb_dev) +{ + mutex_lock(&uwb_dev->mutex); + __uwb_dev_sys_rm(uwb_dev); + mutex_unlock(&uwb_dev->mutex); +} + + +static +int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev) +{ + struct uwb_dev *target_uwb_dev = __target_uwb_dev; + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + if (uwb_dev == target_uwb_dev) { + uwb_dev_get(uwb_dev); + return 1; + } else + return 0; +} + + +/** + * Given a UWB device descriptor, validate and refcount it + * + * @returns NULL if the device does not exist or is quiescing; the ptr to + * it otherwise. + */ +struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev) +{ + if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev)) + return uwb_dev; + else + return NULL; +} +EXPORT_SYMBOL_GPL(uwb_dev_try_get); + + +/** + * Remove a device from the system [grunt for other functions] + */ +int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) +{ + struct device *dev = &uwb_dev->dev; + char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; + + d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc); + uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); + uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); + dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", + macbuf, devbuf, + rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", + rc ? dev_name(rc->uwb_dev.dev.parent) : ""); + uwb_dev_rm(uwb_dev); + uwb_dev_put(uwb_dev); /* for the creation in _onair() */ + d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc); + return 0; +} + + +/** + * A device went off the air, clean up after it! + * + * This is called by the UWB Daemon (through the beacon purge function + * uwb_bcn_cache_purge) when it is detected that a device has been in + * radio silence for a while. + * + * If this device is actually a local radio controller we don't need + * to go through the offair process, as it is not registered as that. + * + * NOTE: uwb_bcn_cache.mutex is held! + */ +void uwbd_dev_offair(struct uwb_beca_e *bce) +{ + struct uwb_dev *uwb_dev; + + uwb_dev = bce->uwb_dev; + if (uwb_dev) { + uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR); + __uwb_dev_offair(uwb_dev, uwb_dev->rc); + } +} + + +/** + * A device went on the air, start it up! + * + * This is called by the UWB Daemon when it is detected that a device + * has popped up in the radio range of the radio controller. + * + * It will just create the freaking device, register the beacon and + * stuff and yatla, done. + * + * + * NOTE: uwb_beca.mutex is held, bce->mutex is held + */ +void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) +{ + int result; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_dev *uwb_dev; + char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; + + uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr); + uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr); + uwb_dev = kcalloc(1, sizeof(*uwb_dev), GFP_KERNEL); + if (uwb_dev == NULL) { + dev_err(dev, "new device %s: Cannot allocate memory\n", + macbuf); + return; + } + uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */ + uwb_dev->mac_addr = *bce->mac_addr; + uwb_dev->dev_addr = bce->dev_addr; + dev_set_name(&uwb_dev->dev, macbuf); + result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); + if (result < 0) { + dev_err(dev, "new device %s: cannot instantiate device\n", + macbuf); + goto error_dev_add; + } + /* plug the beacon cache */ + bce->uwb_dev = uwb_dev; + uwb_dev->bce = bce; + uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ + dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", + macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, + dev_name(rc->uwb_dev.dev.parent)); + uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR); + return; + +error_dev_add: + kfree(uwb_dev); + return; +} + +/** + * Iterate over the list of UWB devices, calling a @function on each + * + * See docs for bus_for_each().... + * + * @rc: radio controller for the devices. + * @function: function to call. + * @priv: data to pass to @function. + * @returns: 0 if no invocation of function() returned a value + * different to zero. That value otherwise. + */ +int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv) +{ + return device_for_each_child(&rc->uwb_dev.dev, priv, function); +} +EXPORT_SYMBOL_GPL(uwb_dev_for_each); diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c new file mode 100644 index 0000000..a21c96b --- /dev/null +++ b/drivers/uwb/lc-rc.c @@ -0,0 +1,501 @@ +/* + * Ultra Wide Band + * Life cycle of radio controllers + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * A UWB radio controller is also a UWB device, so it embeds one... + * + * List of RCs comes from the 'struct class uwb_rc_class'. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define D_LOCAL 1 +#include +#include "uwb-internal.h" + +static int uwb_rc_index_match(struct device *dev, void *data) +{ + int *index = data; + struct uwb_rc *rc = dev_get_drvdata(dev); + + if (rc->index == *index) + return 1; + return 0; +} + +static struct uwb_rc *uwb_rc_find_by_index(int index) +{ + struct device *dev; + struct uwb_rc *rc = NULL; + + dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); + if (dev) + rc = dev_get_drvdata(dev); + return rc; +} + +static int uwb_rc_new_index(void) +{ + int index = 0; + + for (;;) { + if (!uwb_rc_find_by_index(index)) + return index; + if (++index < 0) + index = 0; + } +} + +/** + * Release the backing device of a uwb_rc that has been dynamically allocated. + */ +static void uwb_rc_sys_release(struct device *dev) +{ + struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); + struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); + + uwb_rc_neh_destroy(rc); + uwb_rc_ie_release(rc); + d_printf(1, dev, "freed uwb_rc %p\n", rc); + kfree(rc); +} + + +void uwb_rc_init(struct uwb_rc *rc) +{ + struct uwb_dev *uwb_dev = &rc->uwb_dev; + + uwb_dev_init(uwb_dev); + rc->uwb_dev.dev.class = &uwb_rc_class; + rc->uwb_dev.dev.release = uwb_rc_sys_release; + uwb_rc_neh_create(rc); + rc->beaconing = -1; + rc->scan_type = UWB_SCAN_DISABLED; + INIT_LIST_HEAD(&rc->notifs_chain.list); + mutex_init(&rc->notifs_chain.mutex); + uwb_drp_avail_init(rc); + uwb_rc_ie_init(rc); + uwb_rsv_init(rc); + uwb_rc_pal_init(rc); +} +EXPORT_SYMBOL_GPL(uwb_rc_init); + + +struct uwb_rc *uwb_rc_alloc(void) +{ + struct uwb_rc *rc; + rc = kzalloc(sizeof(*rc), GFP_KERNEL); + if (rc == NULL) + return NULL; + uwb_rc_init(rc); + return rc; +} +EXPORT_SYMBOL_GPL(uwb_rc_alloc); + +static struct attribute *rc_attrs[] = { + &dev_attr_mac_address.attr, + &dev_attr_scan.attr, + &dev_attr_beacon.attr, + NULL, +}; + +static struct attribute_group rc_attr_group = { + .attrs = rc_attrs, +}; + +/* + * Registration of sysfs specific stuff + */ +static int uwb_rc_sys_add(struct uwb_rc *rc) +{ + return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); +} + + +static void __uwb_rc_sys_rm(struct uwb_rc *rc) +{ + sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); +} + +/** + * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it + * @rc: the radio controller. + * + * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF + * then a random locally administered EUI-48 is generated and set on + * the device. The probability of address collisions is sufficiently + * unlikely (1/2^40 = 9.1e-13) that they're not checked for. + */ +static +int uwb_rc_mac_addr_setup(struct uwb_rc *rc) +{ + int result; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_dev *uwb_dev = &rc->uwb_dev; + char devname[UWB_ADDR_STRSIZE]; + struct uwb_mac_addr addr; + + result = uwb_rc_mac_addr_get(rc, &addr); + if (result < 0) { + dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result); + return result; + } + + if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) { + addr.data[0] = 0x02; /* locally adminstered and unicast */ + get_random_bytes(&addr.data[1], sizeof(addr.data)-1); + + result = uwb_rc_mac_addr_set(rc, &addr); + if (result < 0) { + uwb_mac_addr_print(devname, sizeof(devname), &addr); + dev_err(dev, "cannot set EUI-48 address %s: %d\n", + devname, result); + return result; + } + } + uwb_dev->mac_addr = addr; + return 0; +} + + + +static int uwb_rc_setup(struct uwb_rc *rc) +{ + int result; + struct device *dev = &rc->uwb_dev.dev; + + result = uwb_rc_reset(rc); + if (result < 0) { + dev_err(dev, "cannot reset UWB radio: %d\n", result); + goto error; + } + result = uwb_rc_mac_addr_setup(rc); + if (result < 0) { + dev_err(dev, "cannot setup UWB MAC address: %d\n", result); + goto error; + } + result = uwb_rc_dev_addr_assign(rc); + if (result < 0) { + dev_err(dev, "cannot assign UWB DevAddr: %d\n", result); + goto error; + } + result = uwb_rc_ie_setup(rc); + if (result < 0) { + dev_err(dev, "cannot setup IE subsystem: %d\n", result); + goto error_ie_setup; + } + result = uwb_rc_set_identification_ie(rc); + if (result < 0) { + dev_err(dev, "cannot set Identification IE: %d\n", + result); + goto error_set_id_ie; + } + result = uwb_rsv_setup(rc); + if (result < 0) { + dev_err(dev, "cannot setup reservation subsystem: %d\n", result); + goto error_rsv_setup; + } + uwb_dbg_add_rc(rc); + return 0; + +error_rsv_setup: + uwb_rc_ie_release(rc); +error_ie_setup: +error: + return result; +} + + +/** + * Register a new UWB radio controller + * + * Did you call uwb_rc_init() on your rc? + * + * We assume that this is being called with a > 0 refcount on + * it [through ops->{get|put}_device(). We'll take our own, though. + * + * @parent_dev is our real device, the one that provides the actual UWB device + */ +int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) +{ + int result; + struct device *dev; + char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; + + rc->index = uwb_rc_new_index(); + + dev = &rc->uwb_dev.dev; + dev_set_name(dev, "uwb%d", rc->index); + + rc->priv = priv; + + result = rc->start(rc); + if (result < 0) + goto error_rc_start; + + result = uwb_rc_setup(rc); + if (result < 0) { + dev_err(dev, "cannot setup UWB radio controller: %d\n", result); + goto error_rc_setup; + } + + result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc); + if (result < 0 && result != -EADDRNOTAVAIL) + goto error_dev_add; + + result = uwb_rc_sys_add(rc); + if (result < 0) { + dev_err(parent_dev, "cannot register UWB radio controller " + "dev attributes: %d\n", result); + goto error_sys_add; + } + + uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr); + uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr); + dev_info(dev, + "new uwb radio controller (mac %s dev %s) on %s %s\n", + macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev)); + rc->ready = 1; + return 0; + +error_sys_add: + uwb_dev_rm(&rc->uwb_dev); +error_dev_add: +error_rc_setup: + rc->stop(rc); + uwbd_flush(rc); +error_rc_start: + return result; +} +EXPORT_SYMBOL_GPL(uwb_rc_add); + + +static int uwb_dev_offair_helper(struct device *dev, void *priv) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + + return __uwb_dev_offair(uwb_dev, uwb_dev->rc); +} + +/* + * Remove a Radio Controller; stop beaconing/scanning, disconnect all children + */ +void uwb_rc_rm(struct uwb_rc *rc) +{ + rc->ready = 0; + + uwb_dbg_del_rc(rc); + uwb_rsv_cleanup(rc); + uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE); + if (rc->beaconing >= 0) + uwb_rc_beacon(rc, -1, 0); + if (rc->scan_type != UWB_SCAN_DISABLED) + uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0); + uwb_rc_reset(rc); + + rc->stop(rc); + uwbd_flush(rc); + + uwb_dev_lock(&rc->uwb_dev); + rc->priv = NULL; + rc->cmd = NULL; + uwb_dev_unlock(&rc->uwb_dev); + mutex_lock(&uwb_beca.mutex); + uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); + __uwb_rc_sys_rm(rc); + mutex_unlock(&uwb_beca.mutex); + uwb_dev_rm(&rc->uwb_dev); +} +EXPORT_SYMBOL_GPL(uwb_rc_rm); + +static int find_rc_try_get(struct device *dev, void *data) +{ + struct uwb_rc *target_rc = data; + struct uwb_rc *rc = dev_get_drvdata(dev); + + if (rc == NULL) { + WARN_ON(1); + return 0; + } + if (rc == target_rc) { + if (rc->ready == 0) + return 0; + else + return 1; + } + return 0; +} + +/** + * Given a radio controller descriptor, validate and refcount it + * + * @returns NULL if the rc does not exist or is quiescing; the ptr to + * it otherwise. + */ +struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) +{ + struct device *dev; + struct uwb_rc *rc = NULL; + + dev = class_find_device(&uwb_rc_class, NULL, target_rc, + find_rc_try_get); + if (dev) { + rc = dev_get_drvdata(dev); + __uwb_rc_get(rc); + } + return rc; +} +EXPORT_SYMBOL_GPL(__uwb_rc_try_get); + +/* + * RC get for external refcount acquirers... + * + * Increments the refcount of the device and it's backend modules + */ +static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc) +{ + if (rc->ready == 0) + return NULL; + uwb_dev_get(&rc->uwb_dev); + return rc; +} + +static int find_rc_grandpa(struct device *dev, void *data) +{ + struct device *grandpa_dev = data; + struct uwb_rc *rc = dev_get_drvdata(dev); + + if (rc->uwb_dev.dev.parent->parent == grandpa_dev) { + rc = uwb_rc_get(rc); + return 1; + } + return 0; +} + +/** + * Locate and refcount a radio controller given a common grand-parent + * + * @grandpa_dev Pointer to the 'grandparent' device structure. + * @returns NULL If the rc does not exist or is quiescing; the ptr to + * it otherwise, properly referenced. + * + * The Radio Control interface (or the UWB Radio Controller) is always + * an interface of a device. The parent is the interface, the + * grandparent is the device that encapsulates the interface. + * + * There is no need to lock around as the "grandpa" would be + * refcounted by the target, and to remove the referemes, the + * uwb_rc_class->sem would have to be taken--we hold it, ergo we + * should be safe. + */ +struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) +{ + struct device *dev; + struct uwb_rc *rc = NULL; + + dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev, + find_rc_grandpa); + if (dev) + rc = dev_get_drvdata(dev); + return rc; +} +EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); + +/** + * Find a radio controller by device address + * + * @returns the pointer to the radio controller, properly referenced + */ +static int find_rc_dev(struct device *dev, void *data) +{ + struct uwb_dev_addr *addr = data; + struct uwb_rc *rc = dev_get_drvdata(dev); + + if (rc == NULL) { + WARN_ON(1); + return 0; + } + if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) { + rc = uwb_rc_get(rc); + return 1; + } + return 0; +} + +struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) +{ + struct device *dev; + struct uwb_rc *rc = NULL; + + dev = class_find_device(&uwb_rc_class, NULL, (void *)addr, + find_rc_dev); + if (dev) + rc = dev_get_drvdata(dev); + + return rc; +} +EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev); + +/** + * Drop a reference on a radio controller + * + * This is the version that should be done by entities external to the + * UWB Radio Control stack (ie: clients of the API). + */ +void uwb_rc_put(struct uwb_rc *rc) +{ + __uwb_rc_put(rc); +} +EXPORT_SYMBOL_GPL(uwb_rc_put); + +/* + * + * + */ +ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size) +{ + ssize_t result; + struct uwb_rc_evt_get_ie *ie_info; + struct uwb_buf_ctx ctx; + + result = uwb_rc_get_ie(uwb_rc, &ie_info); + if (result < 0) + goto error_get_ie; + ctx.buf = buf; + ctx.size = size; + ctx.bytes = 0; + uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx, + ie_info->IEData, result - sizeof(*ie_info)); + result = ctx.bytes; + kfree(ie_info); +error_get_ie: + return result; +} + diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c new file mode 100644 index 0000000..5508993 --- /dev/null +++ b/drivers/uwb/pal.c @@ -0,0 +1,71 @@ +/* + * UWB PAL support. + * + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +#include "uwb-internal.h" + +/** + * uwb_pal_init - initialize a UWB PAL + * @pal: the PAL to initialize + */ +void uwb_pal_init(struct uwb_pal *pal) +{ + INIT_LIST_HEAD(&pal->node); +} +EXPORT_SYMBOL_GPL(uwb_pal_init); + +/** + * uwb_pal_register - register a UWB PAL + * @rc: the radio controller the PAL will be using + * @pal: the PAL + * + * The PAL must be initialized with uwb_pal_init(). + */ +int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) +{ + spin_lock(&rc->pal_lock); + list_add(&pal->node, &rc->pals); + spin_unlock(&rc->pal_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(uwb_pal_register); + +/** + * uwb_pal_register - unregister a UWB PAL + * @rc: the radio controller the PAL was using + * @pal: the PAL + */ +void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) +{ + spin_lock(&rc->pal_lock); + list_del(&pal->node); + spin_unlock(&rc->pal_lock); +} +EXPORT_SYMBOL_GPL(uwb_pal_unregister); + +/** + * uwb_rc_pal_init - initialize the PAL related parts of a radio controller + * @rc: the radio controller + */ +void uwb_rc_pal_init(struct uwb_rc *rc) +{ + spin_lock_init(&rc->pal_lock); + INIT_LIST_HEAD(&rc->pals); +} diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h new file mode 100644 index 0000000..4f525a8 --- /dev/null +++ b/drivers/uwb/uwb-internal.h @@ -0,0 +1,306 @@ +/* + * Ultra Wide Band + * UWB internal API + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * This contains most of the internal API for UWB. This is stuff used + * across the stack that of course, is of no interest to the rest. + * + * Some parts might end up going public (like uwb_rc_*())... + */ + +#ifndef __UWB_INTERNAL_H__ +#define __UWB_INTERNAL_H__ + +#include +#include +#include +#include +#include + +struct uwb_beca_e; + +/* General device API */ +extern void uwb_dev_init(struct uwb_dev *uwb_dev); +extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *); +extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev, + struct uwb_rc *parent_rc); +extern void uwb_dev_rm(struct uwb_dev *uwb_dev); +extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *); +extern void uwbd_dev_offair(struct uwb_beca_e *); +void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event); + +/* General UWB Radio Controller Internal API */ +extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *); +static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc) +{ + uwb_dev_get(&rc->uwb_dev); + return rc; +} + +static inline void __uwb_rc_put(struct uwb_rc *rc) +{ + uwb_dev_put(&rc->uwb_dev); +} + +extern int uwb_rc_reset(struct uwb_rc *rc); +extern int uwb_rc_beacon(struct uwb_rc *rc, + int channel, unsigned bpst_offset); +extern int uwb_rc_scan(struct uwb_rc *rc, + unsigned channel, enum uwb_scan_type type, + unsigned bpst_offset); +extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); +extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t); +extern void uwb_rc_ie_init(struct uwb_rc *); +extern void uwb_rc_ie_init(struct uwb_rc *); +extern ssize_t uwb_rc_ie_setup(struct uwb_rc *); +extern void uwb_rc_ie_release(struct uwb_rc *); +extern int uwb_rc_ie_add(struct uwb_rc *, + const struct uwb_ie_hdr *, size_t); +extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); +extern int uwb_rc_set_identification_ie(struct uwb_rc *); + +extern const char *uwb_rc_strerror(unsigned code); + +/* + * Time to wait for a response to an RC command. + * + * Some commands can take a long time to response. e.g., START_BEACON + * may scan for several superframes before joining an existing beacon + * group and this can take around 600 ms. + */ +#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */ + +/* + * Notification/Event Handlers + */ + +struct uwb_rc_neh; + +void uwb_rc_neh_create(struct uwb_rc *rc); +void uwb_rc_neh_destroy(struct uwb_rc *rc); + +struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd, + u8 expected_type, u16 expected_event, + uwb_rc_cmd_cb_f cb, void *arg); +void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh); +void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh); +void uwb_rc_neh_put(struct uwb_rc_neh *neh); + +/* Event size tables */ +extern int uwb_est_create(void); +extern void uwb_est_destroy(void); + + +/* + * UWB Events & management daemon + */ + +/** + * enum uwb_event_type - types of UWB management daemon events + * + * The UWB management daemon (uwbd) can receive two types of events: + * UWB_EVT_TYPE_NOTIF - notification from the radio controller. + * UWB_EVT_TYPE_MSG - a simple message. + */ +enum uwb_event_type { + UWB_EVT_TYPE_NOTIF, + UWB_EVT_TYPE_MSG, +}; + +/** + * struct uwb_event_notif - an event for a radio controller notification + * @size: Size of the buffer (ie: Guaranteed to contain at least + * a full 'struct uwb_rceb') + * @rceb: Pointer to a kmalloced() event payload + */ +struct uwb_event_notif { + size_t size; + struct uwb_rceb *rceb; +}; + +/** + * enum uwb_event_message - an event for a message for asynchronous processing + * + * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware. + */ +enum uwb_event_message { + UWB_EVT_MSG_RESET, +}; + +/** + * UWB Event + * @rc: Radio controller that emitted the event (referenced) + * @ts_jiffies: Timestamp, when was it received + * @type: This event's type. + */ +struct uwb_event { + struct list_head list_node; + struct uwb_rc *rc; + unsigned long ts_jiffies; + enum uwb_event_type type; + union { + struct uwb_event_notif notif; + enum uwb_event_message message; + }; +}; + +extern void uwbd_start(void); +extern void uwbd_stop(void); +extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); +extern void uwbd_event_queue(struct uwb_event *); +void uwbd_flush(struct uwb_rc *rc); + +/* UWB event handlers */ +extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); +extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); +extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); +extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *); +extern int uwbd_evt_handle_rc_drp(struct uwb_event *); +extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *); + +int uwbd_msg_handle_reset(struct uwb_event *evt); + + +/* + * Address management + */ +int uwb_rc_dev_addr_assign(struct uwb_rc *rc); +int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt); + +/* + * UWB Beacon Cache + * + * Each beacon we received is kept in a cache--when we receive that + * beacon consistently, that means there is a new device that we have + * to add to the system. + */ + +extern unsigned long beacon_timeout_ms; + +/** Beacon cache list */ +struct uwb_beca { + struct list_head list; + size_t entries; + struct mutex mutex; +}; + +extern struct uwb_beca uwb_beca; + +/** + * Beacon cache entry + * + * @jiffies_refresh: last time a beacon was received that refreshed + * this cache entry. + * @uwb_dev: device connected to this beacon. This pointer is not + * safe, you need to get it with uwb_dev_try_get() + * + * @hits: how many time we have seen this beacon since last time we + * cleared it + */ +struct uwb_beca_e { + struct mutex mutex; + struct kref refcnt; + struct list_head node; + struct uwb_mac_addr *mac_addr; + struct uwb_dev_addr dev_addr; + u8 hits; + unsigned long ts_jiffies; + struct uwb_dev *uwb_dev; + struct uwb_rc_evt_beacon *be; + struct stats lqe_stats, rssi_stats; /* radio statistics */ +}; +struct uwb_beacon_frame; +extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, + char *, size_t); +extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *, + struct uwb_beacon_frame *, + unsigned long); + +extern void uwb_bce_kfree(struct kref *_bce); +static inline void uwb_bce_get(struct uwb_beca_e *bce) +{ + kref_get(&bce->refcnt); +} +static inline void uwb_bce_put(struct uwb_beca_e *bce) +{ + kref_put(&bce->refcnt, uwb_bce_kfree); +} +extern void uwb_beca_purge(void); +extern void uwb_beca_release(void); + +struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, + const struct uwb_dev_addr *devaddr); +struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, + const struct uwb_mac_addr *macaddr); + +/* -- UWB Sysfs representation */ +extern struct class uwb_rc_class; +extern struct device_attribute dev_attr_mac_address; +extern struct device_attribute dev_attr_beacon; +extern struct device_attribute dev_attr_scan; + +/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */ +void uwb_rsv_init(struct uwb_rc *rc); +int uwb_rsv_setup(struct uwb_rc *rc); +void uwb_rsv_cleanup(struct uwb_rc *rc); + +void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); +void uwb_rsv_remove(struct uwb_rsv *rsv); +struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, + struct uwb_ie_drp *drp_ie); +void uwb_rsv_sched_update(struct uwb_rc *rc); + +void uwb_drp_handle_timeout(struct uwb_rsv *rsv); +int uwb_drp_ie_update(struct uwb_rsv *rsv); +void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); + +void uwb_drp_avail_init(struct uwb_rc *rc); +int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); +void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); +void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); +void uwb_drp_avail_ie_update(struct uwb_rc *rc); + +/* -- PAL support */ +void uwb_rc_pal_init(struct uwb_rc *rc); + +/* -- Misc */ + +extern ssize_t uwb_mac_frame_hdr_print(char *, size_t, + const struct uwb_mac_frame_hdr *); + +/* -- Debug interface */ +void uwb_dbg_init(void); +void uwb_dbg_exit(void); +void uwb_dbg_add_rc(struct uwb_rc *rc); +void uwb_dbg_del_rc(struct uwb_rc *rc); + +/* Workarounds for version specific stuff */ + +static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) +{ + down(&uwb_dev->dev.sem); +} + +static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev) +{ + up(&uwb_dev->dev.sem); +} + +#endif /* #ifndef __UWB_INTERNAL_H__ */ diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c new file mode 100644 index 0000000..b3673d6 --- /dev/null +++ b/drivers/uwb/uwbd.c @@ -0,0 +1,427 @@ +/* + * Ultra Wide Band + * Neighborhood Management Daemon + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This daemon takes care of maintaing information that describes the + * UWB neighborhood that the radios in this machine can see. It also + * keeps a tab of which devices are visible, makes sure each HC sits + * on a different channel to avoid interfering, etc. + * + * Different drivers (radio controller, device, any API in general) + * communicate with this daemon through an event queue. Daemon wakes + * up, takes a list of events and handles them one by one; handling + * function is extracted from a table based on the event's type and + * subtype. Events are freed only if the handling function says so. + * + * . Lock protecting the event list has to be an spinlock and locked + * with IRQSAVE because it might be called from an interrupt + * context (ie: when events arrive and the notification drops + * down from the ISR). + * + * . UWB radio controller drivers queue events to the daemon using + * uwbd_event_queue(). They just get the event, chew it to make it + * look like UWBD likes it and pass it in a buffer allocated with + * uwb_event_alloc(). + * + * EVENTS + * + * Events have a type, a subtype, a lenght, some other stuff and the + * data blob, which depends on the event. The header is 'struct + * uwb_event'; for payloads, see 'struct uwbd_evt_*'. + * + * EVENT HANDLER TABLES + * + * To find a handling function for an event, the type is used to index + * a subtype-table in the type-table. The subtype-table is indexed + * with the subtype to get the function that handles the event. Start + * with the main type-table 'uwbd_evt_type_handler'. + * + * DEVICES + * + * Devices are created when a bunch of beacons have been received and + * it is stablished that the device has stable radio presence. CREATED + * only, not configured. Devices are ONLY configured when an + * Application-Specific IE Probe is receieved, in which the device + * declares which Protocol ID it groks. Then the device is CONFIGURED + * (and the driver->probe() stuff of the device model is invoked). + * + * Devices are considered disconnected when a certain number of + * beacons are not received in an amount of time. + * + * Handler functions are called normally uwbd_evt_handle_*(). + */ + +#include +#include +#include +#include "uwb-internal.h" + +#define D_LOCAL 1 +#include + + +/** + * UWBD Event handler function signature + * + * Return !0 if the event needs not to be freed (ie the handler + * takes/took care of it). 0 means the daemon code will free the + * event. + * + * @evt->rc is already referenced and guaranteed to exist. See + * uwb_evt_handle(). + */ +typedef int (*uwbd_evt_handler_f)(struct uwb_event *); + +/** + * Properties of a UWBD event + * + * @handler: the function that will handle this event + * @name: text name of event + */ +struct uwbd_event { + uwbd_evt_handler_f handler; + const char *name; +}; + +/** Table of handlers for and properties of the UWBD Radio Control Events */ +static +struct uwbd_event uwbd_events[] = { + [UWB_RC_EVT_BEACON] = { + .handler = uwbd_evt_handle_rc_beacon, + .name = "BEACON_RECEIVED" + }, + [UWB_RC_EVT_BEACON_SIZE] = { + .handler = uwbd_evt_handle_rc_beacon_size, + .name = "BEACON_SIZE_CHANGE" + }, + [UWB_RC_EVT_BPOIE_CHANGE] = { + .handler = uwbd_evt_handle_rc_bpoie_change, + .name = "BPOIE_CHANGE" + }, + [UWB_RC_EVT_BP_SLOT_CHANGE] = { + .handler = uwbd_evt_handle_rc_bp_slot_change, + .name = "BP_SLOT_CHANGE" + }, + [UWB_RC_EVT_DRP_AVAIL] = { + .handler = uwbd_evt_handle_rc_drp_avail, + .name = "DRP_AVAILABILITY_CHANGE" + }, + [UWB_RC_EVT_DRP] = { + .handler = uwbd_evt_handle_rc_drp, + .name = "DRP" + }, + [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { + .handler = uwbd_evt_handle_rc_dev_addr_conflict, + .name = "DEV_ADDR_CONFLICT", + }, +}; + + + +struct uwbd_evt_type_handler { + const char *name; + struct uwbd_event *uwbd_events; + size_t size; +}; + +#define UWBD_EVT_TYPE_HANDLER(n,a) { \ + .name = (n), \ + .uwbd_events = (a), \ + .size = sizeof(a)/sizeof((a)[0]) \ +} + + +/** Table of handlers for each UWBD Event type. */ +static +struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = { + [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events) +}; + +static const +size_t uwbd_evt_type_handlers_len = + sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]); + +static const struct uwbd_event uwbd_message_handlers[] = { + [UWB_EVT_MSG_RESET] = { + .handler = uwbd_msg_handle_reset, + .name = "reset", + }, +}; + +static DEFINE_MUTEX(uwbd_event_mutex); + +/** + * Handle an URC event passed to the UWB Daemon + * + * @evt: the event to handle + * @returns: 0 if the event can be kfreed, !0 on the contrary + * (somebody else took ownership) [coincidentally, returning + * a <0 errno code will free it :)]. + * + * Looks up the two indirection tables (one for the type, one for the + * subtype) to decide which function handles it and then calls the + * handler. + * + * The event structure passed to the event handler has the radio + * controller in @evt->rc referenced. The reference will be dropped + * once the handler returns, so if it needs it for longer (async), + * it'll need to take another one. + */ +static +int uwbd_event_handle_urc(struct uwb_event *evt) +{ + int result; + struct uwbd_evt_type_handler *type_table; + uwbd_evt_handler_f handler; + u8 type, context; + u16 event; + + type = evt->notif.rceb->bEventType; + event = le16_to_cpu(evt->notif.rceb->wEvent); + context = evt->notif.rceb->bEventContext; + + if (type > uwbd_evt_type_handlers_len) { + if (printk_ratelimit()) + printk(KERN_ERR "UWBD: event type %u: unknown " + "(too high)\n", type); + return -EINVAL; + } + type_table = &uwbd_evt_type_handlers[type]; + if (type_table->uwbd_events == NULL) { + if (printk_ratelimit()) + printk(KERN_ERR "UWBD: event type %u: unknown\n", type); + return -EINVAL; + } + if (event > type_table->size) { + if (printk_ratelimit()) + printk(KERN_ERR "UWBD: event %s[%u]: " + "unknown (too high)\n", type_table->name, event); + return -EINVAL; + } + handler = type_table->uwbd_events[event].handler; + if (handler == NULL) { + if (printk_ratelimit()) + printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", + type_table->name, event); + return -EINVAL; + } + d_printf(3, NULL, "processing 0x%02x/%04x/%02x, %zu bytes\n", + type, event, context, evt->notif.size); + result = (*handler)(evt); + if (result < 0) { + if (printk_ratelimit()) + printk(KERN_ERR "UWBD: event 0x%02x/%04x/%02x, " + "table %s[%u]: handling failed: %d\n", + type, event, context, type_table->name, + event, result); + } + return result; +} + +static void uwbd_event_handle_message(struct uwb_event *evt) +{ + struct uwb_rc *rc; + int result; + + rc = evt->rc; + + if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) { + dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message); + return; + } + + /* If this is a reset event we need to drop the + * uwbd_event_mutex or it deadlocks when the reset handler + * attempts to flush the uwbd events. */ + if (evt->message == UWB_EVT_MSG_RESET) + mutex_unlock(&uwbd_event_mutex); + + result = uwbd_message_handlers[evt->message].handler(evt); + if (result < 0) + dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", + uwbd_message_handlers[evt->message].name, result); + + if (evt->message == UWB_EVT_MSG_RESET) + mutex_lock(&uwbd_event_mutex); +} + +static void uwbd_event_handle(struct uwb_event *evt) +{ + struct uwb_rc *rc; + int should_keep; + + rc = evt->rc; + + if (rc->ready) { + switch (evt->type) { + case UWB_EVT_TYPE_NOTIF: + should_keep = uwbd_event_handle_urc(evt); + if (should_keep <= 0) + kfree(evt->notif.rceb); + break; + case UWB_EVT_TYPE_MSG: + uwbd_event_handle_message(evt); + break; + default: + dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type); + break; + } + } + + __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ +} +/* The UWB Daemon */ + + +/** Daemon's PID: used to decide if we can queue or not */ +static int uwbd_pid; +/** Daemon's task struct for managing the kthread */ +static struct task_struct *uwbd_task; +/** Daemon's waitqueue for waiting for new events */ +static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq); +/** Daemon's list of events; we queue/dequeue here */ +static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list); +/** Daemon's list lock to protect concurent access */ +static DEFINE_SPINLOCK(uwbd_event_list_lock); + + +/** + * UWB Daemon + * + * Listens to all UWB notifications and takes care to track the state + * of the UWB neighboorhood for the kernel. When we do a run, we + * spinlock, move the list to a private copy and release the + * lock. Hold it as little as possible. Not a conflict: it is + * guaranteed we own the events in the private list. + * + * FIXME: should change so we don't have a 1HZ timer all the time, but + * only if there are devices. + */ +static int uwbd(void *unused) +{ + unsigned long flags; + struct list_head list = LIST_HEAD_INIT(list); + struct uwb_event *evt, *nxt; + int should_stop = 0; + while (1) { + wait_event_interruptible_timeout( + uwbd_wq, + !list_empty(&uwbd_event_list) + || (should_stop = kthread_should_stop()), + HZ); + if (should_stop) + break; + try_to_freeze(); + + mutex_lock(&uwbd_event_mutex); + spin_lock_irqsave(&uwbd_event_list_lock, flags); + list_splice_init(&uwbd_event_list, &list); + spin_unlock_irqrestore(&uwbd_event_list_lock, flags); + list_for_each_entry_safe(evt, nxt, &list, list_node) { + list_del(&evt->list_node); + uwbd_event_handle(evt); + kfree(evt); + } + mutex_unlock(&uwbd_event_mutex); + + uwb_beca_purge(); /* Purge devices that left */ + } + return 0; +} + + +/** Start the UWB daemon */ +void uwbd_start(void) +{ + uwbd_task = kthread_run(uwbd, NULL, "uwbd"); + if (uwbd_task == NULL) + printk(KERN_ERR "UWB: Cannot start management daemon; " + "UWB won't work\n"); + else + uwbd_pid = uwbd_task->pid; +} + +/* Stop the UWB daemon and free any unprocessed events */ +void uwbd_stop(void) +{ + unsigned long flags; + struct uwb_event *evt, *nxt; + kthread_stop(uwbd_task); + spin_lock_irqsave(&uwbd_event_list_lock, flags); + uwbd_pid = 0; + list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { + if (evt->type == UWB_EVT_TYPE_NOTIF) + kfree(evt->notif.rceb); + kfree(evt); + } + spin_unlock_irqrestore(&uwbd_event_list_lock, flags); + uwb_beca_release(); +} + +/* + * Queue an event for the management daemon + * + * When some lower layer receives an event, it uses this function to + * push it forward to the UWB daemon. + * + * Once you pass the event, you don't own it any more, but the daemon + * does. It will uwb_event_free() it when done, so make sure you + * uwb_event_alloc()ed it or bad things will happen. + * + * If the daemon is not running, we just free the event. + */ +void uwbd_event_queue(struct uwb_event *evt) +{ + unsigned long flags; + spin_lock_irqsave(&uwbd_event_list_lock, flags); + if (uwbd_pid != 0) { + list_add(&evt->list_node, &uwbd_event_list); + wake_up_all(&uwbd_wq); + } else { + __uwb_rc_put(evt->rc); + if (evt->type == UWB_EVT_TYPE_NOTIF) + kfree(evt->notif.rceb); + kfree(evt); + } + spin_unlock_irqrestore(&uwbd_event_list_lock, flags); + return; +} + +void uwbd_flush(struct uwb_rc *rc) +{ + struct uwb_event *evt, *nxt; + + mutex_lock(&uwbd_event_mutex); + + spin_lock_irq(&uwbd_event_list_lock); + list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { + if (evt->rc == rc) { + __uwb_rc_put(rc); + list_del(&evt->list_node); + if (evt->type == UWB_EVT_TYPE_NOTIF) + kfree(evt->notif.rceb); + kfree(evt); + } + } + spin_unlock_irq(&uwbd_event_list_lock); + + mutex_unlock(&uwbd_event_mutex); +} -- cgit v0.10.2 From 0612edfd95ffe92201a2267e9e1b0fc68becf76d Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:07 +0100 Subject: uwb: add the UWB stack (radio controller interface) Add the UWB radio controller interface (URCI) support. Signed-off-by: David Vrabel diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c new file mode 100644 index 0000000..1667afb --- /dev/null +++ b/drivers/uwb/est.c @@ -0,0 +1,485 @@ +/* + * Ultra Wide Band Radio Control + * Event Size Tables management + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * Infrastructure, code and data tables for guessing the size of + * events received on the notification endpoints of UWB radio + * controllers. + * + * You define a table of events and for each, its size and how to get + * the extra size. + * + * ENTRY POINTS: + * + * uwb_est_{init/destroy}(): To initialize/release the EST subsystem. + * + * uwb_est_[u]register(): To un/register event size tables + * uwb_est_grow() + * + * uwb_est_find_size(): Get the size of an event + * uwb_est_get_size() + */ +#include +#define D_LOCAL 0 +#include +#include "uwb-internal.h" + + +struct uwb_est { + u16 type_event_high; + u16 vendor, product; + u8 entries; + const struct uwb_est_entry *entry; +}; + + +static struct uwb_est *uwb_est; +static u8 uwb_est_size; +static u8 uwb_est_used; +static DEFINE_RWLOCK(uwb_est_lock); + +/** + * WUSB Standard Event Size Table, HWA-RC interface + * + * Sizes for events and notifications type 0 (general), high nibble 0. + */ +static +struct uwb_est_entry uwb_est_00_00xx[] = { + [UWB_RC_EVT_IE_RCV] = { + .size = sizeof(struct uwb_rc_evt_ie_rcv), + .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength), + }, + [UWB_RC_EVT_BEACON] = { + .size = sizeof(struct uwb_rc_evt_beacon), + .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength), + }, + [UWB_RC_EVT_BEACON_SIZE] = { + .size = sizeof(struct uwb_rc_evt_beacon_size), + }, + [UWB_RC_EVT_BPOIE_CHANGE] = { + .size = sizeof(struct uwb_rc_evt_bpoie_change), + .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change, + wBPOIELength), + }, + [UWB_RC_EVT_BP_SLOT_CHANGE] = { + .size = sizeof(struct uwb_rc_evt_bp_slot_change), + }, + [UWB_RC_EVT_BP_SWITCH_IE_RCV] = { + .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv), + .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength), + }, + [UWB_RC_EVT_DEV_ADDR_CONFLICT] = { + .size = sizeof(struct uwb_rc_evt_dev_addr_conflict), + }, + [UWB_RC_EVT_DRP_AVAIL] = { + .size = sizeof(struct uwb_rc_evt_drp_avail) + }, + [UWB_RC_EVT_DRP] = { + .size = sizeof(struct uwb_rc_evt_drp), + .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length), + }, + [UWB_RC_EVT_BP_SWITCH_STATUS] = { + .size = sizeof(struct uwb_rc_evt_bp_switch_status), + }, + [UWB_RC_EVT_CMD_FRAME_RCV] = { + .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv), + .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength), + }, + [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = { + .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv), + .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength), + }, + [UWB_RC_CMD_CHANNEL_CHANGE] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_DEV_ADDR_MGMT] = { + .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) }, + [UWB_RC_CMD_GET_IE] = { + .size = sizeof(struct uwb_rc_evt_get_ie), + .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength), + }, + [UWB_RC_CMD_RESET] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SCAN] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SET_BEACON_FILTER] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SET_DRP_IE] = { + .size = sizeof(struct uwb_rc_evt_set_drp_ie), + }, + [UWB_RC_CMD_SET_IE] = { + .size = sizeof(struct uwb_rc_evt_set_ie), + }, + [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SET_TX_POWER] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SLEEP] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_START_BEACON] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_STOP_BEACON] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_BP_MERGE] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SEND_COMMAND_FRAME] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, + [UWB_RC_CMD_SET_ASIE_NOTIF] = { + .size = sizeof(struct uwb_rc_evt_confirm), + }, +}; + +static +struct uwb_est_entry uwb_est_01_00xx[] = { + [UWB_RC_DAA_ENERGY_DETECTED] = { + .size = sizeof(struct uwb_rc_evt_daa_energy_detected), + }, + [UWB_RC_SET_DAA_ENERGY_MASK] = { + .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask), + }, + [UWB_RC_SET_NOTIFICATION_FILTER_EX] = { + .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex), + }, +}; + +/** + * Initialize the EST subsystem + * + * Register the standard tables also. + * + * FIXME: tag init + */ +int uwb_est_create(void) +{ + int result; + + uwb_est_size = 2; + uwb_est_used = 0; + uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL); + if (uwb_est == NULL) + return -ENOMEM; + + result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff, + uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx)); + if (result < 0) + goto out; + result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff, + uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx)); +out: + return result; +} + + +/** Clean it up */ +void uwb_est_destroy(void) +{ + kfree(uwb_est); + uwb_est = NULL; + uwb_est_size = uwb_est_used = 0; +} + + +/** + * Double the capacity of the EST table + * + * @returns 0 if ok, < 0 errno no error. + */ +static +int uwb_est_grow(void) +{ + size_t actual_size = uwb_est_size * sizeof(uwb_est[0]); + void *new = kmalloc(2 * actual_size, GFP_ATOMIC); + if (new == NULL) + return -ENOMEM; + memcpy(new, uwb_est, actual_size); + memset(new + actual_size, 0, actual_size); + kfree(uwb_est); + uwb_est = new; + uwb_est_size *= 2; + return 0; +} + + +/** + * Register an event size table + * + * Makes room for it if the table is full, and then inserts it in the + * right position (entries are sorted by type, event_high, vendor and + * then product). + * + * @vendor: vendor code for matching against the device (0x0000 and + * 0xffff mean any); use 0x0000 to force all to match without + * checking possible vendor specific ones, 0xfffff to match + * after checking vendor specific ones. + * + * @product: product code from that vendor; same matching rules, use + * 0x0000 for not allowing vendor specific matches, 0xffff + * for allowing. + * + * This arragement just makes the tables sort differenty. Because the + * table is sorted by growing type-event_high-vendor-product, a zero + * vendor will match before than a 0x456a vendor, that will match + * before a 0xfffff vendor. + * + * @returns 0 if ok, < 0 errno on error (-ENOENT if not found). + */ +/* FIXME: add bus type to vendor/product code */ +int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product, + const struct uwb_est_entry *entry, size_t entries) +{ + unsigned long flags; + unsigned itr; + u16 type_event_high; + int result = 0; + + write_lock_irqsave(&uwb_est_lock, flags); + if (uwb_est_used == uwb_est_size) { + result = uwb_est_grow(); + if (result < 0) + goto out; + } + /* Find the right spot to insert it in */ + type_event_high = type << 8 | event_high; + for (itr = 0; itr < uwb_est_used; itr++) + if (uwb_est[itr].type_event_high < type + && uwb_est[itr].vendor < vendor + && uwb_est[itr].product < product) + break; + + /* Shift others to make room for the new one? */ + if (itr < uwb_est_used) + memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr); + uwb_est[itr].type_event_high = type << 8 | event_high; + uwb_est[itr].vendor = vendor; + uwb_est[itr].product = product; + uwb_est[itr].entry = entry; + uwb_est[itr].entries = entries; + uwb_est_used++; +out: + write_unlock_irqrestore(&uwb_est_lock, flags); + return result; +} +EXPORT_SYMBOL_GPL(uwb_est_register); + + +/** + * Unregister an event size table + * + * This just removes the specified entry and moves the ones after it + * to fill in the gap. This is needed to keep the list sorted; no + * reallocation is done to reduce the size of the table. + * + * We unregister by all the data we used to register instead of by + * pointer to the @entry array because we might have used the same + * table for a bunch of IDs (for example). + * + * @returns 0 if ok, < 0 errno on error (-ENOENT if not found). + */ +int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product, + const struct uwb_est_entry *entry, size_t entries) +{ + unsigned long flags; + unsigned itr; + struct uwb_est est_cmp = { + .type_event_high = type << 8 | event_high, + .vendor = vendor, + .product = product, + .entry = entry, + .entries = entries + }; + write_lock_irqsave(&uwb_est_lock, flags); + for (itr = 0; itr < uwb_est_used; itr++) + if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp))) + goto found; + write_unlock_irqrestore(&uwb_est_lock, flags); + return -ENOENT; + +found: + if (itr < uwb_est_used - 1) /* Not last one? move ones above */ + memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1); + uwb_est_used--; + write_unlock_irqrestore(&uwb_est_lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(uwb_est_unregister); + + +/** + * Get the size of an event from a table + * + * @rceb: pointer to the buffer with the event + * @rceb_size: size of the area pointed to by @rceb in bytes. + * @returns: > 0 Size of the event + * -ENOSPC An area big enough was not provided to look + * ahead into the event's guts and guess the size. + * -EINVAL Unknown event code (wEvent). + * + * This will look at the received RCEB and guess what is the total + * size. For variable sized events, it will look further ahead into + * their length field to see how much data should be read. + * + * Note this size is *not* final--the neh (Notification/Event Handle) + * might specificy an extra size to add. + */ +static +ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est, + u8 event_low, const struct uwb_rceb *rceb, + size_t rceb_size) +{ + unsigned offset; + ssize_t size; + struct device *dev = &uwb_rc->uwb_dev.dev; + const struct uwb_est_entry *entry; + + size = -ENOENT; + if (event_low >= est->entries) { /* in range? */ + if (printk_ratelimit()) + dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: " + "event %u out of range\n", + est, est->type_event_high, est->vendor, + est->product, est->entries, + event_low); + goto out; + } + size = -ENOENT; + entry = &est->entry[event_low]; + if (entry->size == 0 && entry->offset == 0) { /* unknown? */ + if (printk_ratelimit()) + dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: " + "event %u unknown\n", + est, est->type_event_high, est->vendor, + est->product, est->entries, event_low); + goto out; + } + offset = entry->offset; /* extra fries with that? */ + if (offset == 0) + size = entry->size; + else { + /* Ops, got an extra size field at 'offset'--read it */ + const void *ptr = rceb; + size_t type_size = 0; + offset--; + size = -ENOSPC; /* enough data for more? */ + switch (entry->type) { + case UWB_EST_16: type_size = sizeof(__le16); break; + case UWB_EST_8: type_size = sizeof(u8); break; + default: BUG(); + } + if (offset + type_size >= rceb_size) { + if (printk_ratelimit()) + dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: " + "not enough data to read extra size\n", + est, est->type_event_high, est->vendor, + est->product, est->entries); + goto out; + } + size = entry->size; + ptr += offset; + switch (entry->type) { + case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break; + case UWB_EST_8: size += *(u8 *)ptr; break; + default: BUG(); + } + } +out: + return size; +} + + +/** + * Guesses the size of a WA event + * + * @rceb: pointer to the buffer with the event + * @rceb_size: size of the area pointed to by @rceb in bytes. + * @returns: > 0 Size of the event + * -ENOSPC An area big enough was not provided to look + * ahead into the event's guts and guess the size. + * -EINVAL Unknown event code (wEvent). + * + * This will look at the received RCEB and guess what is the total + * size by checking all the tables registered with + * uwb_est_register(). For variable sized events, it will look further + * ahead into their length field to see how much data should be read. + * + * Note this size is *not* final--the neh (Notification/Event Handle) + * might specificy an extra size to add or replace. + */ +ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, + size_t rceb_size) +{ + /* FIXME: add vendor/product data */ + ssize_t size; + struct device *dev = &rc->uwb_dev.dev; + unsigned long flags; + unsigned itr; + u16 type_event_high, event; + u8 *ptr = (u8 *) rceb; + + read_lock_irqsave(&uwb_est_lock, flags); + d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x," + " buffer size %ld\n", + (unsigned) rceb->bEventType, + (unsigned) le16_to_cpu(rceb->wEvent), + (unsigned) rceb->bEventContext, + (long) rceb_size); + size = -ENOSPC; + if (rceb_size < sizeof(*rceb)) + goto out; + event = le16_to_cpu(rceb->wEvent); + type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; + for (itr = 0; itr < uwb_est_used; itr++) { + d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n", + uwb_est[itr].type_event_high, uwb_est[itr].vendor, + uwb_est[itr].product); + if (uwb_est[itr].type_event_high != type_event_high) + continue; + size = uwb_est_get_size(rc, &uwb_est[itr], + event & 0x00ff, rceb, rceb_size); + /* try more tables that might handle the same type */ + if (size != -ENOENT) + goto out; + } + /* FIXME: downgrade to _dbg() */ + if (printk_ratelimit()) + dev_err(dev, "event 0x%02x/%04x/%02x: no handlers available; " + "RCEB %02x %02x %02x %02x\n", + (unsigned) rceb->bEventType, + (unsigned) le16_to_cpu(rceb->wEvent), + (unsigned) rceb->bEventContext, + ptr[0], ptr[1], ptr[2], ptr[3]); + size = -ENOENT; +out: + read_unlock_irqrestore(&uwb_est_lock, flags); + return size; +} +EXPORT_SYMBOL_GPL(uwb_est_find_size); diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c new file mode 100644 index 0000000..91b6148 --- /dev/null +++ b/drivers/uwb/neh.c @@ -0,0 +1,616 @@ +/* + * WUSB Wire Adapter: Radio Control Interface (WUSB[8]) + * Notification and Event Handling + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI + * card delivers a stream of notifications and events to the + * notification end event endpoint or area. This code takes care of + * getting a buffer with that data, breaking it up in separate + * notifications and events and then deliver those. + * + * Events are answers to commands and they carry a context ID that + * associates them to the command. Notifications are that, + * notifications, they come out of the blue and have a context ID of + * zero. Think of the context ID kind of like a handler. The + * uwb_rc_neh_* code deals with managing context IDs. + * + * This is why you require a handle to operate on a UWB host. When you + * open a handle a context ID is assigned to you. + * + * So, as it is done is: + * + * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id) + * 2. Issue command [rc->cmd(rc, ...)] + * 3. Arm the timeout timer [uwb_rc_neh_arm()] + * 4, Release the reference to the neh [uwb_rc_neh_put()] + * 5. Wait for the callback + * 6. Command result (RCEB) is passed to the callback + * + * If (2) fails, you should remove the handle [uwb_rc_neh_rm()] + * instead of arming the timer. + * + * Handles are for using in *serialized* code, single thread. + * + * When the notification/event comes, the IRQ handler/endpoint + * callback passes the data read to uwb_rc_neh_grok() which will break + * it up in a discrete series of events, look up who is listening for + * them and execute the pertinent callbacks. + * + * If the reader detects an error while reading the data stream, call + * uwb_rc_neh_error(). + * + * CONSTRAINTS/ASSUMPTIONS: + * + * - Most notifications/events are small (less thank .5k), copying + * around is ok. + * + * - Notifications/events are ALWAYS smaller than PAGE_SIZE + * + * - Notifications/events always come in a single piece (ie: a buffer + * will always contain entire notifications/events). + * + * - we cannot know in advance how long each event is (because they + * lack a length field in their header--smart move by the standards + * body, btw). So we need a facility to get the event size given the + * header. This is what the EST code does (notif/Event Size + * Tables), check nest.c--as well, you can associate the size to + * the handle [w/ neh->extra_size()]. + * + * - Most notifications/events are fixed size; only a few are variable + * size (NEST takes care of that). + * + * - Listeners of events expect them, so they usually provide a + * buffer, as they know the size. Listeners to notifications don't, + * so we allocate their buffers dynamically. + */ +#include +#include +#include + +#include "uwb-internal.h" +#define D_LOCAL 0 +#include + +/* + * UWB Radio Controller Notification/Event Handle + * + * Represents an entity waiting for an event coming from the UWB Radio + * Controller with a given context id (context) and type (evt_type and + * evt). On reception of the notification/event, the callback (cb) is + * called with the event. + * + * If the timer expires before the event is received, the callback is + * called with -ETIMEDOUT as the event size. + */ +struct uwb_rc_neh { + struct kref kref; + + struct uwb_rc *rc; + u8 evt_type; + __le16 evt; + u8 context; + uwb_rc_cmd_cb_f cb; + void *arg; + + struct timer_list timer; + struct list_head list_node; +}; + +static void uwb_rc_neh_timer(unsigned long arg); + +static void uwb_rc_neh_release(struct kref *kref) +{ + struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref); + + kfree(neh); +} + +static void uwb_rc_neh_get(struct uwb_rc_neh *neh) +{ + kref_get(&neh->kref); +} + +/** + * uwb_rc_neh_put - release reference to a neh + * @neh: the neh + */ +void uwb_rc_neh_put(struct uwb_rc_neh *neh) +{ + kref_put(&neh->kref, uwb_rc_neh_release); +} + + +/** + * Assigns @neh a context id from @rc's pool + * + * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken + * @neh: Notification/Event Handle + * @returns 0 if context id was assigned ok; < 0 errno on error (if + * all the context IDs are taken). + * + * (assumes @wa is locked). + * + * NOTE: WUSB spec reserves context ids 0x00 for notifications and + * 0xff is invalid, so they must not be used. Initialization + * fills up those two in the bitmap so they are not allocated. + * + * We spread the allocation around to reduce the posiblity of two + * consecutive opened @neh's getting the same context ID assigned (to + * avoid surprises with late events that timed out long time ago). So + * first we search from where @rc->ctx_roll is, if not found, we + * search from zero. + */ +static +int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh) +{ + int result; + result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX, + rc->ctx_roll++); + if (result < UWB_RC_CTX_MAX) + goto found; + result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX); + if (result < UWB_RC_CTX_MAX) + goto found; + return -ENFILE; +found: + set_bit(result, rc->ctx_bm); + neh->context = result; + return 0; +} + + +/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */ +static +void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh) +{ + struct device *dev = &rc->uwb_dev.dev; + if (neh->context == 0) + return; + if (test_bit(neh->context, rc->ctx_bm) == 0) { + dev_err(dev, "context %u not set in bitmap\n", + neh->context); + WARN_ON(1); + } + clear_bit(neh->context, rc->ctx_bm); + neh->context = 0; +} + +/** + * uwb_rc_neh_add - add a neh for a radio controller command + * @rc: the radio controller + * @cmd: the radio controller command + * @expected_type: the type of the expected response event + * @expected_event: the expected event ID + * @cb: callback for when the event is received + * @arg: argument for the callback + * + * Creates a neh and adds it to the list of those waiting for an + * event. A context ID will be assigned to the command. + */ +struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd, + u8 expected_type, u16 expected_event, + uwb_rc_cmd_cb_f cb, void *arg) +{ + int result; + unsigned long flags; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_neh *neh; + + neh = kzalloc(sizeof(*neh), GFP_KERNEL); + if (neh == NULL) { + result = -ENOMEM; + goto error_kzalloc; + } + + kref_init(&neh->kref); + INIT_LIST_HEAD(&neh->list_node); + init_timer(&neh->timer); + neh->timer.function = uwb_rc_neh_timer; + neh->timer.data = (unsigned long)neh; + + neh->rc = rc; + neh->evt_type = expected_type; + neh->evt = cpu_to_le16(expected_event); + neh->cb = cb; + neh->arg = arg; + + spin_lock_irqsave(&rc->neh_lock, flags); + result = __uwb_rc_ctx_get(rc, neh); + if (result >= 0) { + cmd->bCommandContext = neh->context; + list_add_tail(&neh->list_node, &rc->neh_list); + uwb_rc_neh_get(neh); + } + spin_unlock_irqrestore(&rc->neh_lock, flags); + if (result < 0) + goto error_ctx_get; + + return neh; + +error_ctx_get: + kfree(neh); +error_kzalloc: + dev_err(dev, "cannot open handle to radio controller: %d\n", result); + return ERR_PTR(result); +} + +static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) +{ + del_timer(&neh->timer); + __uwb_rc_ctx_put(rc, neh); + list_del(&neh->list_node); +} + +/** + * uwb_rc_neh_rm - remove a neh. + * @rc: the radio controller + * @neh: the neh to remove + * + * Remove an active neh immediately instead of waiting for the event + * (or a time out). + */ +void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) +{ + unsigned long flags; + + spin_lock_irqsave(&rc->neh_lock, flags); + __uwb_rc_neh_rm(rc, neh); + spin_unlock_irqrestore(&rc->neh_lock, flags); + + uwb_rc_neh_put(neh); +} + +/** + * uwb_rc_neh_arm - arm an event handler timeout timer + * + * @rc: UWB Radio Controller + * @neh: Notification/event handler for @rc + * + * The timer is only armed if the neh is active. + */ +void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh) +{ + unsigned long flags; + + spin_lock_irqsave(&rc->neh_lock, flags); + if (neh->context) + mod_timer(&neh->timer, + jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS)); + spin_unlock_irqrestore(&rc->neh_lock, flags); +} + +static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size) +{ + (*neh->cb)(neh->rc, neh->arg, rceb, size); + uwb_rc_neh_put(neh); +} + +static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb) +{ + return neh->evt_type == rceb->bEventType + && neh->evt == rceb->wEvent + && neh->context == rceb->bEventContext; +} + +/** + * Find the handle waiting for a RC Radio Control Event + * + * @rc: UWB Radio Controller + * @rceb: Pointer to the RCEB buffer + * @event_size: Pointer to the size of the RCEB buffer. Might be + * adjusted to take into account the @neh->extra_size + * settings. + * + * If the listener has no buffer (NULL buffer), one is allocated for + * the right size (the amount of data received). @neh->ptr will point + * to the event payload, which always starts with a 'struct + * uwb_rceb'. kfree() it when done. + */ +static +struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc, + const struct uwb_rceb *rceb) +{ + struct uwb_rc_neh *neh = NULL, *h; + unsigned long flags; + + spin_lock_irqsave(&rc->neh_lock, flags); + + list_for_each_entry(h, &rc->neh_list, list_node) { + if (uwb_rc_neh_match(h, rceb)) { + neh = h; + break; + } + } + + if (neh) + __uwb_rc_neh_rm(rc, neh); + + spin_unlock_irqrestore(&rc->neh_lock, flags); + + return neh; +} + + +/** + * Process notifications coming from the radio control interface + * + * @rc: UWB Radio Control Interface descriptor + * @neh: Notification/Event Handler @neh->ptr points to + * @uwb_evt->buffer. + * + * This function is called by the event/notif handling subsystem when + * notifications arrive (hwarc_probe() arms a notification/event handle + * that calls back this function for every received notification; this + * function then will rearm itself). + * + * Notification data buffers are dynamically allocated by the NEH + * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually + * allocated is space to contain the notification data. + * + * Buffers are prefixed with a Radio Control Event Block (RCEB) as + * defined by the WUSB Wired-Adapter Radio Control interface. We + * just use it for the notification code. + * + * On each case statement we just transcode endianess of the different + * fields. We declare a pointer to a RCI definition of an event, and + * then to a UWB definition of the same event (which are the same, + * remember). Event if we use different pointers + */ +static +void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size) +{ + struct device *dev = &rc->uwb_dev.dev; + struct uwb_event *uwb_evt; + + if (size == -ESHUTDOWN) + return; + if (size < 0) { + dev_err(dev, "ignoring event with error code %zu\n", + size); + return; + } + + uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC); + if (unlikely(uwb_evt == NULL)) { + dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n", + rceb->bEventType, le16_to_cpu(rceb->wEvent), + rceb->bEventContext); + return; + } + uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ + uwb_evt->ts_jiffies = jiffies; + uwb_evt->type = UWB_EVT_TYPE_NOTIF; + uwb_evt->notif.size = size; + uwb_evt->notif.rceb = rceb; + + switch (le16_to_cpu(rceb->wEvent)) { + /* Trap some vendor specific events + * + * FIXME: move this to handling in ptc-est, where we + * register a NULL event handler for these two guys + * using the Intel IDs. + */ + case 0x0103: + dev_info(dev, "FIXME: DEVICE ADD\n"); + return; + case 0x0104: + dev_info(dev, "FIXME: DEVICE RM\n"); + return; + default: + break; + } + + uwbd_event_queue(uwb_evt); +} + +static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size) +{ + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_neh *neh; + struct uwb_rceb *notif; + + if (rceb->bEventContext == 0) { + notif = kmalloc(size, GFP_ATOMIC); + if (notif) { + memcpy(notif, rceb, size); + uwb_rc_notif(rc, notif, size); + } else + dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n", + rceb->bEventType, le16_to_cpu(rceb->wEvent), + rceb->bEventContext, size); + } else { + neh = uwb_rc_neh_lookup(rc, rceb); + if (neh) + uwb_rc_neh_cb(neh, rceb, size); + else if (printk_ratelimit()) + dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", + rceb->bEventType, le16_to_cpu(rceb->wEvent), + rceb->bEventContext, size); + } +} + +/** + * Given a buffer with one or more UWB RC events/notifications, break + * them up and dispatch them. + * + * @rc: UWB Radio Controller + * @buf: Buffer with the stream of notifications/events + * @buf_size: Amount of data in the buffer + * + * Note each notification/event starts always with a 'struct + * uwb_rceb', so the minimum size if 4 bytes. + * + * The device may pass us events formatted differently than expected. + * These are first filtered, potentially creating a new event in a new + * memory location. If a new event is created by the filter it is also + * freed here. + * + * For each notif/event, tries to guess the size looking at the EST + * tables, then looks for a neh that is waiting for that event and if + * found, copies the payload to the neh's buffer and calls it back. If + * not, the data is ignored. + * + * Note that if we can't find a size description in the EST tables, we + * still might find a size in the 'neh' handle in uwb_rc_neh_lookup(). + * + * Assumptions: + * + * @rc->neh_lock is NOT taken + * + * We keep track of various sizes here: + * size: contains the size of the buffer that is processed for the + * incoming event. this buffer may contain events that are not + * formatted as WHCI. + * real_size: the actual space taken by this event in the buffer. + * We need to keep track of the real size of an event to be able to + * advance the buffer correctly. + * event_size: the size of the event as expected by the core layer + * [OR] the size of the event after filtering. if the filtering + * created a new event in a new memory location then this is + * effectively the size of a new event buffer + */ +void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) +{ + struct device *dev = &rc->uwb_dev.dev; + void *itr; + struct uwb_rceb *rceb; + size_t size, real_size, event_size; + int needtofree; + + d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size); + d_printf(2, dev, "groking event block: %zu bytes\n", buf_size); + itr = buf; + size = buf_size; + while (size > 0) { + if (size < sizeof(*rceb)) { + dev_err(dev, "not enough data in event buffer to " + "process incoming events (%zu left, minimum is " + "%zu)\n", size, sizeof(*rceb)); + break; + } + + rceb = itr; + if (rc->filter_event) { + needtofree = rc->filter_event(rc, &rceb, size, + &real_size, &event_size); + if (needtofree < 0 && needtofree != -ENOANO) { + dev_err(dev, "BUG: Unable to filter event " + "(0x%02x/%04x/%02x) from " + "device. \n", rceb->bEventType, + le16_to_cpu(rceb->wEvent), + rceb->bEventContext); + break; + } + } else + needtofree = -ENOANO; + /* do real processing if there was no filtering or the + * filtering didn't act */ + if (needtofree == -ENOANO) { + ssize_t ret = uwb_est_find_size(rc, rceb, size); + if (ret < 0) + break; + if (ret > size) { + dev_err(dev, "BUG: hw sent incomplete event " + "0x%02x/%04x/%02x (%zd bytes), only got " + "%zu bytes. We don't handle that.\n", + rceb->bEventType, le16_to_cpu(rceb->wEvent), + rceb->bEventContext, ret, size); + break; + } + real_size = event_size = ret; + } + uwb_rc_neh_grok_event(rc, rceb, event_size); + + if (needtofree == 1) + kfree(rceb); + + itr += real_size; + size -= real_size; + d_printf(2, dev, "consumed %zd bytes, %zu left\n", + event_size, size); + } + d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size); +} +EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); + + +/** + * The entity that reads from the device notification/event channel has + * detected an error. + * + * @rc: UWB Radio Controller + * @error: Errno error code + * + */ +void uwb_rc_neh_error(struct uwb_rc *rc, int error) +{ + struct uwb_rc_neh *neh, *next; + unsigned long flags; + + BUG_ON(error >= 0); + spin_lock_irqsave(&rc->neh_lock, flags); + list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { + __uwb_rc_neh_rm(rc, neh); + uwb_rc_neh_cb(neh, NULL, error); + } + spin_unlock_irqrestore(&rc->neh_lock, flags); +} +EXPORT_SYMBOL_GPL(uwb_rc_neh_error); + + +static void uwb_rc_neh_timer(unsigned long arg) +{ + struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg; + struct uwb_rc *rc = neh->rc; + unsigned long flags; + + spin_lock_irqsave(&rc->neh_lock, flags); + __uwb_rc_neh_rm(rc, neh); + spin_unlock_irqrestore(&rc->neh_lock, flags); + + uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); +} + +/** Initializes the @rc's neh subsystem + */ +void uwb_rc_neh_create(struct uwb_rc *rc) +{ + spin_lock_init(&rc->neh_lock); + INIT_LIST_HEAD(&rc->neh_list); + set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */ + set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */ + rc->ctx_roll = 1; +} + + +/** Release's the @rc's neh subsystem */ +void uwb_rc_neh_destroy(struct uwb_rc *rc) +{ + unsigned long flags; + struct uwb_rc_neh *neh, *next; + + spin_lock_irqsave(&rc->neh_lock, flags); + list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { + __uwb_rc_neh_rm(rc, neh); + uwb_rc_neh_put(neh); + } + spin_unlock_irqrestore(&rc->neh_lock, flags); +} diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c new file mode 100644 index 0000000..8de856f --- /dev/null +++ b/drivers/uwb/reset.c @@ -0,0 +1,362 @@ +/* + * Ultra Wide Band + * UWB basic command support and radio reset + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: + * + * - docs + * + * - Now we are serializing (using the uwb_dev->mutex) the command + * execution; it should be parallelized as much as possible some + * day. + */ +#include +#include + +#include "uwb-internal.h" +#define D_LOCAL 0 +#include + +/** + * Command result codes (WUSB1.0[T8-69]) + */ +static +const char *__strerror[] = { + "success", + "failure", + "hardware failure", + "no more slots", + "beacon is too large", + "invalid parameter", + "unsupported power level", + "time out (wa) or invalid ie data (whci)", + "beacon size exceeded", + "cancelled", + "invalid state", + "invalid size", + "ack not recieved", + "no more asie notification", +}; + + +/** Return a string matching the given error code */ +const char *uwb_rc_strerror(unsigned code) +{ + if (code == 255) + return "time out"; + if (code >= ARRAY_SIZE(__strerror)) + return "unknown error"; + return __strerror[code]; +} + +int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + uwb_rc_cmd_cb_f cb, void *arg) +{ + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_neh *neh; + int needtofree = 0; + int result; + + uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */ + if (rc->priv == NULL) { + uwb_dev_unlock(&rc->uwb_dev); + return -ESHUTDOWN; + } + + if (rc->filter_cmd) { + needtofree = rc->filter_cmd(rc, &cmd, &cmd_size); + if (needtofree < 0 && needtofree != -ENOANO) { + dev_err(dev, "%s: filter error: %d\n", + cmd_name, needtofree); + uwb_dev_unlock(&rc->uwb_dev); + return needtofree; + } + } + + neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg); + if (IS_ERR(neh)) { + result = PTR_ERR(neh); + goto out; + } + + result = rc->cmd(rc, cmd, cmd_size); + uwb_dev_unlock(&rc->uwb_dev); + if (result < 0) + uwb_rc_neh_rm(rc, neh); + else + uwb_rc_neh_arm(rc, neh); + uwb_rc_neh_put(neh); +out: + if (needtofree == 1) + kfree(cmd); + return result < 0 ? result : 0; +} +EXPORT_SYMBOL_GPL(uwb_rc_cmd_async); + +struct uwb_rc_cmd_done_params { + struct completion completion; + struct uwb_rceb *reply; + ssize_t reply_size; +}; + +static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg, + struct uwb_rceb *reply, ssize_t reply_size) +{ + struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg; + + if (reply_size > 0) { + if (p->reply) + reply_size = min(p->reply_size, reply_size); + else + p->reply = kmalloc(reply_size, GFP_ATOMIC); + + if (p->reply) + memcpy(p->reply, reply, reply_size); + else + reply_size = -ENOMEM; + } + p->reply_size = reply_size; + complete(&p->completion); +} + + +/** + * Generic function for issuing commands to the Radio Control Interface + * + * @rc: UWB Radio Control descriptor + * @cmd_name: Name of the command being issued (for error messages) + * @cmd: Pointer to rccb structure containing the command; + * normally you embed this structure as the first member of + * the full command structure. + * @cmd_size: Size of the whole command buffer pointed to by @cmd. + * @reply: Pointer to where to store the reply + * @reply_size: @reply's size + * @expected_type: Expected type in the return event + * @expected_event: Expected event code in the return event + * @preply: Here a pointer to where the event data is received will + * be stored. Once done with the data, free with kfree(). + * + * This function is generic; it works for commands that return a fixed + * and known size or for commands that return a variable amount of data. + * + * If a buffer is provided, that is used, although it could be chopped + * to the maximum size of the buffer. If the buffer is NULL, then one + * be allocated in *preply with the whole contents of the reply. + * + * @rc needs to be referenced + */ +static +ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + struct uwb_rceb *reply, size_t reply_size, + u8 expected_type, u16 expected_event, + struct uwb_rceb **preply) +{ + ssize_t result = 0; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_cmd_done_params params; + + init_completion(¶ms.completion); + params.reply = reply; + params.reply_size = reply_size; + + result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size, + expected_type, expected_event, + uwb_rc_cmd_done, ¶ms); + if (result) + return result; + + wait_for_completion(¶ms.completion); + + if (preply) + *preply = params.reply; + + if (params.reply_size < 0) + dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x " + "reception failed: %d\n", cmd_name, + expected_type, expected_event, cmd->bCommandContext, + (int)params.reply_size); + return params.reply_size; +} + + +/** + * Generic function for issuing commands to the Radio Control Interface + * + * @rc: UWB Radio Control descriptor + * @cmd_name: Name of the command being issued (for error messages) + * @cmd: Pointer to rccb structure containing the command; + * normally you embed this structure as the first member of + * the full command structure. + * @cmd_size: Size of the whole command buffer pointed to by @cmd. + * @reply: Pointer to the beginning of the confirmation event + * buffer. Normally bigger than an 'struct hwarc_rceb'. + * You need to fill out reply->bEventType and reply->wEvent (in + * cpu order) as the function will use them to verify the + * confirmation event. + * @reply_size: Size of the reply buffer + * + * The function checks that the length returned in the reply is at + * least as big as @reply_size; if not, it will be deemed an error and + * -EIO returned. + * + * @rc needs to be referenced + */ +ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + struct uwb_rceb *reply, size_t reply_size) +{ + struct device *dev = &rc->uwb_dev.dev; + ssize_t result; + + result = __uwb_rc_cmd(rc, cmd_name, + cmd, cmd_size, reply, reply_size, + reply->bEventType, reply->wEvent, NULL); + + if (result > 0 && result < reply_size) { + dev_err(dev, "%s: not enough data returned for decoding reply " + "(%zu bytes received vs at least %zu needed)\n", + cmd_name, result, reply_size); + result = -EIO; + } + return result; +} +EXPORT_SYMBOL_GPL(uwb_rc_cmd); + + +/** + * Generic function for issuing commands to the Radio Control + * Interface that return an unknown amount of data + * + * @rc: UWB Radio Control descriptor + * @cmd_name: Name of the command being issued (for error messages) + * @cmd: Pointer to rccb structure containing the command; + * normally you embed this structure as the first member of + * the full command structure. + * @cmd_size: Size of the whole command buffer pointed to by @cmd. + * @expected_type: Expected type in the return event + * @expected_event: Expected event code in the return event + * @preply: Here a pointer to where the event data is received will + * be stored. Once done with the data, free with kfree(). + * + * The function checks that the length returned in the reply is at + * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an + * error and -EIO returned. + * + * @rc needs to be referenced + */ +ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, + struct uwb_rccb *cmd, size_t cmd_size, + u8 expected_type, u16 expected_event, + struct uwb_rceb **preply) +{ + return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0, + expected_type, expected_event, preply); +} +EXPORT_SYMBOL_GPL(uwb_rc_vcmd); + + +/** + * Reset a UWB Host Controller (and all radio settings) + * + * @rc: Host Controller descriptor + * @returns: 0 if ok, < 0 errno code on error + * + * We put the command on kmalloc'ed memory as some arches cannot do + * USB from the stack. The reply event is copied from an stage buffer, + * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. + */ +int uwb_rc_reset(struct uwb_rc *rc) +{ + int result = -ENOMEM; + struct uwb_rc_evt_confirm reply; + struct uwb_rccb *cmd; + size_t cmd_size = sizeof(*cmd); + + mutex_lock(&rc->uwb_dev.mutex); + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + goto error_kzalloc; + cmd->bCommandType = UWB_RC_CET_GENERAL; + cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); + reply.rceb.bEventType = UWB_RC_CET_GENERAL; + reply.rceb.wEvent = UWB_RC_CMD_RESET; + result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size, + &reply.rceb, sizeof(reply)); + if (result < 0) + goto error_cmd; + if (reply.bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(&rc->uwb_dev.dev, + "RESET: command execution failed: %s (%d)\n", + uwb_rc_strerror(reply.bResultCode), reply.bResultCode); + result = -EIO; + } +error_cmd: + kfree(cmd); +error_kzalloc: + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + +int uwbd_msg_handle_reset(struct uwb_event *evt) +{ + struct uwb_rc *rc = evt->rc; + int ret; + + /* Need to prevent the RC hardware module going away while in + the rc->reset() call. */ + if (!try_module_get(rc->owner)) + return 0; + + dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); + ret = rc->reset(rc); + if (ret) + dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); + + module_put(rc->owner); + return ret; +} + +/** + * uwb_rc_reset_all - request a reset of the radio controller and PALs + * @rc: the radio controller of the hardware device to be reset. + * + * The full hardware reset of the radio controller and all the PALs + * will be scheduled. + */ +void uwb_rc_reset_all(struct uwb_rc *rc) +{ + struct uwb_event *evt; + + evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC); + if (unlikely(evt == NULL)) + return; + + evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */ + evt->ts_jiffies = jiffies; + evt->type = UWB_EVT_TYPE_MSG; + evt->message = UWB_EVT_MSG_RESET; + + uwbd_event_queue(evt); +} +EXPORT_SYMBOL_GPL(uwb_rc_reset_all); -- cgit v0.10.2 From 22d203ecef9b0cc1fa8d8f64c935b451ca7d1022 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:08 +0100 Subject: uwb: add the UWB stack (MLME) Most of the MAC Layer Management Entity (MLME) support: address, beacon, IE and scan management. Signed-off-by: David Vrabel diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c new file mode 100644 index 0000000..1664ae5 --- /dev/null +++ b/drivers/uwb/address.c @@ -0,0 +1,374 @@ +/* + * Ultra Wide Band + * Address management + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#include +#include +#include +#include +#include +#include "uwb-internal.h" + + +/** Device Address Management command */ +struct uwb_rc_cmd_dev_addr_mgmt { + struct uwb_rccb rccb; + u8 bmOperationType; + u8 baAddr[6]; +} __attribute__((packed)); + + +/** + * Low level command for setting/getting UWB radio's addresses + * + * @hwarc: HWA Radio Control interface instance + * @bmOperationType: + * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2]) + * @baAddr: address buffer--assumed to have enough data to hold + * the address type requested. + * @reply: Pointer to reply buffer (can be stack allocated) + * @returns: 0 if ok, < 0 errno code on error. + * + * @cmd has to be allocated because USB cannot grok USB or vmalloc + * buffers depending on your combination of host architecture. + */ +static +int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc, + u8 bmOperationType, const u8 *baAddr, + struct uwb_rc_evt_dev_addr_mgmt *reply) +{ + int result; + struct uwb_rc_cmd_dev_addr_mgmt *cmd; + + result = -ENOMEM; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + goto error_kzalloc; + cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; + cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT); + cmd->bmOperationType = bmOperationType; + if (baAddr) { + size_t size = 0; + switch (bmOperationType >> 1) { + case 0: size = 2; break; + case 1: size = 6; break; + default: BUG(); + } + memcpy(cmd->baAddr, baAddr, size); + } + reply->rceb.bEventType = UWB_RC_CET_GENERAL; + reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT; + result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT", + &cmd->rccb, sizeof(*cmd), + &reply->rceb, sizeof(*reply)); + if (result < 0) + goto error_cmd; + if (result < sizeof(*reply)) { + dev_err(&rc->uwb_dev.dev, + "DEV-ADDR-MGMT: not enough data replied: " + "%d vs %zu bytes needed\n", result, sizeof(*reply)); + result = -ENOMSG; + } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(&rc->uwb_dev.dev, + "DEV-ADDR-MGMT: command execution failed: %s (%d)\n", + uwb_rc_strerror(reply->bResultCode), + reply->bResultCode); + result = -EIO; + } else + result = 0; +error_cmd: + kfree(cmd); +error_kzalloc: + return result; +} + + +/** + * Set the UWB RC MAC or device address. + * + * @rc: UWB Radio Controller + * @_addr: Pointer to address to write [assumed to be either a + * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. + * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC). + * @returns: 0 if ok, < 0 errno code on error. + * + * Some anal retentivity here: even if both 'struct + * uwb_{dev,mac}_addr' have the actual byte array in the same offset + * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer + * to use some syntatic sugar in case someday we decide to change the + * format of the structs. The compiler will optimize it out anyway. + */ +static int uwb_rc_addr_set(struct uwb_rc *rc, + const void *_addr, enum uwb_addr_type type) +{ + int result; + u8 bmOperationType = 0x1; /* Set address */ + const struct uwb_dev_addr *dev_addr = _addr; + const struct uwb_mac_addr *mac_addr = _addr; + struct uwb_rc_evt_dev_addr_mgmt reply; + const u8 *baAddr; + + result = -EINVAL; + switch (type) { + case UWB_ADDR_DEV: + baAddr = dev_addr->data; + break; + case UWB_ADDR_MAC: + baAddr = mac_addr->data; + bmOperationType |= 0x2; + break; + default: + return result; + } + return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply); +} + + +/** + * Get the UWB radio's MAC or device address. + * + * @rc: UWB Radio Controller + * @_addr: Where to write the address data [assumed to be either a + * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. + * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC). + * @returns: 0 if ok (and *_addr set), < 0 errno code on error. + * + * See comment in uwb_rc_addr_set() about anal retentivity in the + * type handling of the address variables. + */ +static int uwb_rc_addr_get(struct uwb_rc *rc, + void *_addr, enum uwb_addr_type type) +{ + int result; + u8 bmOperationType = 0x0; /* Get address */ + struct uwb_rc_evt_dev_addr_mgmt evt; + struct uwb_dev_addr *dev_addr = _addr; + struct uwb_mac_addr *mac_addr = _addr; + u8 *baAddr; + + result = -EINVAL; + switch (type) { + case UWB_ADDR_DEV: + baAddr = dev_addr->data; + break; + case UWB_ADDR_MAC: + bmOperationType |= 0x2; + baAddr = mac_addr->data; + break; + default: + return result; + } + result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt); + if (result == 0) + switch (type) { + case UWB_ADDR_DEV: + memcpy(&dev_addr->data, evt.baAddr, + sizeof(dev_addr->data)); + break; + case UWB_ADDR_MAC: + memcpy(&mac_addr->data, evt.baAddr, + sizeof(mac_addr->data)); + break; + default: /* shut gcc up */ + BUG(); + } + return result; +} + + +/** Get @rc's MAC address to @addr */ +int uwb_rc_mac_addr_get(struct uwb_rc *rc, + struct uwb_mac_addr *addr) { + return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC); +} +EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get); + + +/** Get @rc's device address to @addr */ +int uwb_rc_dev_addr_get(struct uwb_rc *rc, + struct uwb_dev_addr *addr) { + return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV); +} +EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get); + + +/** Set @rc's address to @addr */ +int uwb_rc_mac_addr_set(struct uwb_rc *rc, + const struct uwb_mac_addr *addr) +{ + int result = -EINVAL; + mutex_lock(&rc->uwb_dev.mutex); + result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC); + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + + +/** Set @rc's address to @addr */ +int uwb_rc_dev_addr_set(struct uwb_rc *rc, + const struct uwb_dev_addr *addr) +{ + int result = -EINVAL; + mutex_lock(&rc->uwb_dev.mutex); + result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV); + rc->uwb_dev.dev_addr = *addr; + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + +/* Returns !0 if given address is already assigned to device. */ +int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_mac_addr *addr = _addr; + + if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr)) + return !0; + return 0; +} + +/* Returns !0 if given address is already assigned to device. */ +int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_dev_addr *addr = _addr; + if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr)) + return !0; + return 0; +} + +/** + * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller + * @rc: the (local) radio controller device requiring a new DevAddr + * + * A new DevAddr is required when: + * - first setting up a radio controller + * - if the hardware reports a DevAddr conflict + * + * The DevAddr is randomly generated in the generated DevAddr range + * [0x100, 0xfeff]. The number of devices in a beacon group is limited + * by mMaxBPLength (96) so this address space will never be exhausted. + * + * [ECMA-368] 17.1.1, 17.16. + */ +int uwb_rc_dev_addr_assign(struct uwb_rc *rc) +{ + struct uwb_dev_addr new_addr; + + do { + get_random_bytes(new_addr.data, sizeof(new_addr.data)); + } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff + || __uwb_dev_addr_assigned(rc, &new_addr)); + + return uwb_rc_dev_addr_set(rc, &new_addr); +} + +/** + * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event + * @evt: the DEV_ADDR_CONFLICT notification from the radio controller + * + * A new (non-conflicting) DevAddr is assigned to the radio controller. + * + * [ECMA-368] 17.1.1.1. + */ +int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt) +{ + struct uwb_rc *rc = evt->rc; + + return uwb_rc_dev_addr_assign(rc); +} + +/* + * Print the 48-bit EUI MAC address of the radio controller when + * reading /sys/class/uwb_rc/XX/mac_address + */ +static ssize_t uwb_rc_mac_addr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_rc *rc = uwb_dev->rc; + struct uwb_mac_addr addr; + ssize_t result; + + mutex_lock(&rc->uwb_dev.mutex); + result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC); + mutex_unlock(&rc->uwb_dev.mutex); + if (result >= 0) { + result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr); + buf[result++] = '\n'; + } + return result; +} + +/* + * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address + * and if correct, set it. + */ +static ssize_t uwb_rc_mac_addr_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_rc *rc = uwb_dev->rc; + struct uwb_mac_addr addr; + ssize_t result; + + result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n", + &addr.data[0], &addr.data[1], &addr.data[2], + &addr.data[3], &addr.data[4], &addr.data[5]); + if (result != 6) { + result = -EINVAL; + goto out; + } + if (is_multicast_ether_addr(addr.data)) { + dev_err(&rc->uwb_dev.dev, "refusing to set multicast " + "MAC address %s\n", buf); + result = -EINVAL; + goto out; + } + result = uwb_rc_mac_addr_set(rc, &addr); + if (result == 0) + rc->uwb_dev.mac_addr = addr; +out: + return result < 0 ? result : size; +} +DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store); + +/** Print @addr to @buf, @return bytes written */ +size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr, + int type) +{ + size_t result; + if (type) + result = scnprintf(buf, buf_size, + "%02x:%02x:%02x:%02x:%02x:%02x", + addr[0], addr[1], addr[2], + addr[3], addr[4], addr[5]); + else + result = scnprintf(buf, buf_size, "%02x:%02x", + addr[1], addr[0]); + return result; +} +EXPORT_SYMBOL_GPL(__uwb_addr_print); diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c new file mode 100644 index 0000000..f65a52c --- /dev/null +++ b/drivers/uwb/beacon.c @@ -0,0 +1,644 @@ +/* + * Ultra Wide Band + * Beacon management + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#include +#include +#include +#include +#include +#include "uwb-internal.h" + +#define D_LOCAL 0 +#include + +/** Start Beaconing command structure */ +struct uwb_rc_cmd_start_beacon { + struct uwb_rccb rccb; + __le16 wBPSTOffset; + u8 bChannelNumber; +} __attribute__((packed)); + + +static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel) +{ + int result; + struct uwb_rc_cmd_start_beacon *cmd; + struct uwb_rc_evt_confirm reply; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; + cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON); + cmd->wBPSTOffset = cpu_to_le16(bpst_offset); + cmd->bChannelNumber = channel; + reply.rceb.bEventType = UWB_RC_CET_GENERAL; + reply.rceb.wEvent = UWB_RC_CMD_START_BEACON; + result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd), + &reply.rceb, sizeof(reply)); + if (result < 0) + goto error_cmd; + if (reply.bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(&rc->uwb_dev.dev, + "START-BEACON: command execution failed: %s (%d)\n", + uwb_rc_strerror(reply.bResultCode), reply.bResultCode); + result = -EIO; + } +error_cmd: + kfree(cmd); + return result; +} + +static int uwb_rc_stop_beacon(struct uwb_rc *rc) +{ + int result; + struct uwb_rccb *cmd; + struct uwb_rc_evt_confirm reply; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + cmd->bCommandType = UWB_RC_CET_GENERAL; + cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON); + reply.rceb.bEventType = UWB_RC_CET_GENERAL; + reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON; + result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd), + &reply.rceb, sizeof(reply)); + if (result < 0) + goto error_cmd; + if (reply.bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(&rc->uwb_dev.dev, + "STOP-BEACON: command execution failed: %s (%d)\n", + uwb_rc_strerror(reply.bResultCode), reply.bResultCode); + result = -EIO; + } +error_cmd: + kfree(cmd); + return result; +} + +/* + * Start/stop beacons + * + * @rc: UWB Radio Controller to operate on + * @channel: UWB channel on which to beacon (WUSB[table + * 5-12]). If -1, stop beaconing. + * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero + * + * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB + * of a SET IE command after the device sent the first beacon that includes + * the IEs specified in the SET IE command. So, after we start beaconing we + * check if there is anything in the IE cache and call the SET IE command + * if needed. + */ +int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) +{ + int result; + struct device *dev = &rc->uwb_dev.dev; + + mutex_lock(&rc->uwb_dev.mutex); + if (channel < 0) + channel = -1; + if (channel == -1) + result = uwb_rc_stop_beacon(rc); + else { + /* channel >= 0...dah */ + result = uwb_rc_start_beacon(rc, bpst_offset, channel); + if (result < 0) + goto out_up; + if (le16_to_cpu(rc->ies->wIELength) > 0) { + result = uwb_rc_set_ie(rc, rc->ies); + if (result < 0) { + dev_err(dev, "Cannot set new IE on device: " + "%d\n", result); + result = uwb_rc_stop_beacon(rc); + channel = -1; + bpst_offset = 0; + } else + result = 0; + } + } + + if (result < 0) + goto out_up; + rc->beaconing = channel; + + uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE); + +out_up: + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + +/* + * Beacon cache + * + * The purpose of this is to speed up the lookup of becon information + * when a new beacon arrives. The UWB Daemon uses it also to keep a + * tab of which devices are in radio distance and which not. When a + * device's beacon stays present for more than a certain amount of + * time, it is considered a new, usable device. When a beacon ceases + * to be received for a certain amount of time, it is considered that + * the device is gone. + * + * FIXME: use an allocator for the entries + * FIXME: use something faster for search than a list + */ + +struct uwb_beca uwb_beca = { + .list = LIST_HEAD_INIT(uwb_beca.list), + .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex) +}; + + +void uwb_bce_kfree(struct kref *_bce) +{ + struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); + + kfree(bce->be); + kfree(bce); +} + + +/* Find a beacon by dev addr in the cache */ +static +struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr) +{ + struct uwb_beca_e *bce, *next; + list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { + d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n", + dev_addr->data[0], dev_addr->data[1], + bce->dev_addr.data[0], bce->dev_addr.data[1]); + if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) + goto out; + } + bce = NULL; +out: + return bce; +} + +/* Find a beacon by dev addr in the cache */ +static +struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr) +{ + struct uwb_beca_e *bce, *next; + list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { + if (!memcmp(bce->mac_addr, mac_addr->data, + sizeof(bce->mac_addr))) + goto out; + } + bce = NULL; +out: + return bce; +} + +/** + * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr + * @rc: the radio controller that saw the device + * @devaddr: DevAddr of the UWB device to find + * + * There may be more than one matching device (in the case of a + * DevAddr conflict), but only the first one is returned. + */ +struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, + const struct uwb_dev_addr *devaddr) +{ + struct uwb_dev *found = NULL; + struct uwb_beca_e *bce; + + mutex_lock(&uwb_beca.mutex); + bce = __uwb_beca_find_bydev(devaddr); + if (bce) + found = uwb_dev_try_get(rc, bce->uwb_dev); + mutex_unlock(&uwb_beca.mutex); + + return found; +} + +/** + * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48 + * @rc: the radio controller that saw the device + * @devaddr: EUI-48 of the UWB device to find + */ +struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, + const struct uwb_mac_addr *macaddr) +{ + struct uwb_dev *found = NULL; + struct uwb_beca_e *bce; + + mutex_lock(&uwb_beca.mutex); + bce = __uwb_beca_find_bymac(macaddr); + if (bce) + found = uwb_dev_try_get(rc, bce->uwb_dev); + mutex_unlock(&uwb_beca.mutex); + + return found; +} + +/* Initialize a beacon cache entry */ +static void uwb_beca_e_init(struct uwb_beca_e *bce) +{ + mutex_init(&bce->mutex); + kref_init(&bce->refcnt); + stats_init(&bce->lqe_stats); + stats_init(&bce->rssi_stats); +} + +/* + * Add a beacon to the cache + * + * @be: Beacon event information + * @bf: Beacon frame (part of b, really) + * @ts_jiffies: Timestamp (in jiffies) when the beacon was received + */ +struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, + struct uwb_beacon_frame *bf, + unsigned long ts_jiffies) +{ + struct uwb_beca_e *bce; + + bce = kzalloc(sizeof(*bce), GFP_KERNEL); + if (bce == NULL) + return NULL; + uwb_beca_e_init(bce); + bce->ts_jiffies = ts_jiffies; + bce->uwb_dev = NULL; + list_add(&bce->node, &uwb_beca.list); + return bce; +} + +/* + * Wipe out beacon entries that became stale + * + * Remove associated devicest too. + */ +void uwb_beca_purge(void) +{ + struct uwb_beca_e *bce, *next; + unsigned long now = jiffies; + mutex_lock(&uwb_beca.mutex); + list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { + if (now - bce->ts_jiffies + > msecs_to_jiffies(beacon_timeout_ms)) { + uwbd_dev_offair(bce); + list_del(&bce->node); + uwb_bce_put(bce); + } + } + mutex_unlock(&uwb_beca.mutex); +} + +/* Clean up the whole beacon cache. Called on shutdown */ +void uwb_beca_release(void) +{ + struct uwb_beca_e *bce, *next; + mutex_lock(&uwb_beca.mutex); + list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { + list_del(&bce->node); + uwb_bce_put(bce); + } + mutex_unlock(&uwb_beca.mutex); +} + +static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, + struct uwb_beacon_frame *bf) +{ + char macbuf[UWB_ADDR_STRSIZE]; + char devbuf[UWB_ADDR_STRSIZE]; + char dstbuf[UWB_ADDR_STRSIZE]; + + uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier); + uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr); + uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr); + dev_info(&rc->uwb_dev.dev, + "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n", + devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset, + bf->Beacon_Slot_Number, macbuf); +} + +/* + * @bce: beacon cache entry, referenced + */ +ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce, + char *buf, size_t size) +{ + ssize_t result = 0; + struct uwb_rc_evt_beacon *be; + struct uwb_beacon_frame *bf; + struct uwb_buf_ctx ctx = { + .buf = buf, + .bytes = 0, + .size = size + }; + + mutex_lock(&bce->mutex); + be = bce->be; + if (be == NULL) + goto out; + bf = (void *) be->BeaconInfo; + uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx, + bf->IEData, be->wBeaconInfoLength - sizeof(*bf)); + result = ctx.bytes; +out: + mutex_unlock(&bce->mutex); + return result; +} + +/* + * Verify that the beacon event, frame and IEs are ok + */ +static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt, + struct uwb_rc_evt_beacon *be) +{ + int result = -EINVAL; + struct uwb_beacon_frame *bf; + struct device *dev = &rc->uwb_dev.dev; + + /* Is there enough data to decode a beacon frame? */ + if (evt->notif.size < sizeof(*be) + sizeof(*bf)) { + dev_err(dev, "BEACON event: Not enough data to decode " + "(%zu vs %zu bytes needed)\n", evt->notif.size, + sizeof(*be) + sizeof(*bf)); + goto error; + } + /* FIXME: make sure beacon frame IEs are fine and that the whole thing + * is consistent */ + result = 0; +error: + return result; +} + +/* + * Handle UWB_RC_EVT_BEACON events + * + * We check the beacon cache to see how the received beacon fares. If + * is there already we refresh the timestamp. If not we create a new + * entry. + * + * According to the WHCI and WUSB specs, only one beacon frame is + * allowed per notification block, so we don't bother about scanning + * for more. + */ +int uwbd_evt_handle_rc_beacon(struct uwb_event *evt) +{ + int result = -EINVAL; + struct uwb_rc *rc; + struct uwb_rc_evt_beacon *be; + struct uwb_beacon_frame *bf; + struct uwb_beca_e *bce; + struct device *dev = &evt->rc->uwb_dev.dev; + unsigned long last_ts; + + rc = evt->rc; + be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb); + result = uwb_verify_beacon(rc, evt, be); + if (result < 0) + return result; + + /* Ignore beacon if it is from an alien. */ + if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN || + be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) { + if (printk_ratelimit()) + dev_err(dev, "BEACON received from ALIEN. Action? \n"); + result = -ENOSYS; + return 0; + } + bf = (struct uwb_beacon_frame *) be->BeaconInfo; + + /* + * Drop beacons from devices with a NULL EUI-48 -- they cannot + * be uniquely identified. + * + * It's expected that these will all be WUSB devices and they + * have a WUSB specific connection method so ignoring them + * here shouldn't be a problem. + */ + if (uwb_mac_addr_bcast(&bf->Device_Identifier)) + return 0; + + mutex_lock(&uwb_beca.mutex); + bce = __uwb_beca_find_bymac(&bf->Device_Identifier); + if (bce == NULL) { + /* Not in there, a new device is pinging */ + uwb_beacon_print(evt->rc, be, bf); + bce = __uwb_beca_add(be, bf, evt->ts_jiffies); + if (bce == NULL) { + mutex_unlock(&uwb_beca.mutex); + return -ENOMEM; + } + } + mutex_unlock(&uwb_beca.mutex); + + mutex_lock(&bce->mutex); + /* purge old beacon data */ + kfree(bce->be); + + last_ts = bce->ts_jiffies; + + /* Update commonly used fields */ + bce->ts_jiffies = evt->ts_jiffies; + bce->be = be; + bce->dev_addr = bf->hdr.SrcAddr; + bce->mac_addr = &bf->Device_Identifier; + be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset); + be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength); + stats_add_sample(&bce->lqe_stats, be->bLQI - 7); + stats_add_sample(&bce->rssi_stats, be->bRSSI + 18); + + /* + * This might be a beacon from a new device. + */ + if (bce->uwb_dev == NULL) + uwbd_dev_onair(evt->rc, bce); + + mutex_unlock(&bce->mutex); + + return 1; /* we keep the event data */ +} + +/* + * Handle UWB_RC_EVT_BEACON_SIZE events + * + * XXXXX + */ +int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt) +{ + int result = -EINVAL; + struct device *dev = &evt->rc->uwb_dev.dev; + struct uwb_rc_evt_beacon_size *bs; + + /* Is there enough data to decode the event? */ + if (evt->notif.size < sizeof(*bs)) { + dev_err(dev, "BEACON SIZE notification: Not enough data to " + "decode (%zu vs %zu bytes needed)\n", + evt->notif.size, sizeof(*bs)); + goto error; + } + bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb); + if (0) + dev_info(dev, "Beacon size changed to %u bytes " + "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize)); + else { + /* temporary hack until we do something with this message... */ + static unsigned count; + if (++count % 1000 == 0) + dev_info(dev, "Beacon size changed %u times " + "(FIXME: action?)\n", count); + } + result = 0; +error: + return result; +} + +/** + * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event + * @evt: the BP_SLOT_CHANGE notification from the radio controller + * + * If the event indicates that no beacon period slots were available + * then radio controller has transitioned to a non-beaconing state. + * Otherwise, simply save the current beacon slot. + */ +int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt) +{ + struct uwb_rc *rc = evt->rc; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_evt_bp_slot_change *bpsc; + + if (evt->notif.size < sizeof(*bpsc)) { + dev_err(dev, "BP SLOT CHANGE event: Not enough data\n"); + return -EINVAL; + } + bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb); + + mutex_lock(&rc->uwb_dev.mutex); + if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) { + dev_info(dev, "stopped beaconing: No free slots in BP\n"); + rc->beaconing = -1; + } else + rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc); + mutex_unlock(&rc->uwb_dev.mutex); + + return 0; +} + +/** + * Handle UWB_RC_EVT_BPOIE_CHANGE events + * + * XXXXX + */ +struct uwb_ie_bpo { + struct uwb_ie_hdr hdr; + u8 bp_length; + u8 data[]; +} __attribute__((packed)); + +int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt) +{ + int result = -EINVAL; + struct device *dev = &evt->rc->uwb_dev.dev; + struct uwb_rc_evt_bpoie_change *bpoiec; + struct uwb_ie_bpo *bpoie; + static unsigned count; /* FIXME: this is a temp hack */ + size_t iesize; + + /* Is there enough data to decode it? */ + if (evt->notif.size < sizeof(*bpoiec)) { + dev_err(dev, "BPOIEC notification: Not enough data to " + "decode (%zu vs %zu bytes needed)\n", + evt->notif.size, sizeof(*bpoiec)); + goto error; + } + bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb); + iesize = le16_to_cpu(bpoiec->wBPOIELength); + if (iesize < sizeof(*bpoie)) { + dev_err(dev, "BPOIEC notification: Not enough IE data to " + "decode (%zu vs %zu bytes needed)\n", + iesize, sizeof(*bpoie)); + goto error; + } + if (++count % 1000 == 0) /* Lame placeholder */ + dev_info(dev, "BPOIE: %u changes received\n", count); + /* + * FIXME: At this point we should go over all the IEs in the + * bpoiec->BPOIE array and act on each. + */ + result = 0; +error: + return result; +} + +/** + * uwb_bg_joined - is the RC in a beacon group? + * @rc: the radio controller + * + * Returns true if the radio controller is in a beacon group (even if + * it's the sole member). + */ +int uwb_bg_joined(struct uwb_rc *rc) +{ + return rc->beaconing != -1; +} +EXPORT_SYMBOL_GPL(uwb_bg_joined); + +/* + * Print beaconing state. + */ +static ssize_t uwb_rc_beacon_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_rc *rc = uwb_dev->rc; + ssize_t result; + + mutex_lock(&rc->uwb_dev.mutex); + result = sprintf(buf, "%d\n", rc->beaconing); + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + +/* + * Start beaconing on the specified channel, or stop beaconing. + * + * The BPST offset of when to start searching for a beacon group to + * join may be specified. + */ +static ssize_t uwb_rc_beacon_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_rc *rc = uwb_dev->rc; + int channel; + unsigned bpst_offset = 0; + ssize_t result = -EINVAL; + + result = sscanf(buf, "%d %u\n", &channel, &bpst_offset); + if (result >= 1) + result = uwb_rc_beacon(rc, channel, bpst_offset); + + return result < 0 ? result : size; +} +DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store); diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c new file mode 100644 index 0000000..d54fe09 --- /dev/null +++ b/drivers/uwb/ie.c @@ -0,0 +1,570 @@ +/* + * Ultra Wide Band + * Information Element Handling + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include "uwb-internal.h" +#define D_LOCAL 0 +#include + +/** + * uwb_ie_next - get the next IE in a buffer + * @ptr: start of the buffer containing the IE data + * @len: length of the buffer + * + * Both @ptr and @len are updated so subsequent calls to uwb_ie_next() + * will get the next IE. + * + * NULL is returned (and @ptr and @len will not be updated) if there + * are no more IEs in the buffer or the buffer is too short. + */ +struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len) +{ + struct uwb_ie_hdr *hdr; + size_t ie_len; + + if (*len < sizeof(struct uwb_ie_hdr)) + return NULL; + + hdr = *ptr; + ie_len = sizeof(struct uwb_ie_hdr) + hdr->length; + + if (*len < ie_len) + return NULL; + + *ptr += ie_len; + *len -= ie_len; + + return hdr; +} +EXPORT_SYMBOL_GPL(uwb_ie_next); + +/** + * Get the IEs that a radio controller is sending in its beacon + * + * @uwb_rc: UWB Radio Controller + * @returns: Size read from the system + * + * We don't need to lock the uwb_rc's mutex because we don't modify + * anything. Once done with the iedata buffer, call + * uwb_rc_ie_release(iedata). Don't call kfree on it. + */ +ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) +{ + ssize_t result; + struct device *dev = &uwb_rc->uwb_dev.dev; + struct uwb_rccb *cmd = NULL; + struct uwb_rceb *reply = NULL; + struct uwb_rc_evt_get_ie *get_ie; + + d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie); + result = -ENOMEM; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + goto error_kzalloc; + cmd->bCommandType = UWB_RC_CET_GENERAL; + cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); + result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), + UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, + &reply); + if (result < 0) + goto error_cmd; + get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); + if (result < sizeof(*get_ie)) { + dev_err(dev, "not enough data returned for decoding GET IE " + "(%zu bytes received vs %zu needed)\n", + result, sizeof(*get_ie)); + result = -EINVAL; + } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { + dev_err(dev, "not enough data returned for decoding GET IE " + "payload (%zu bytes received vs %zu needed)\n", result, + sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); + result = -EINVAL; + } else + *pget_ie = get_ie; +error_cmd: + kfree(cmd); +error_kzalloc: + d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result); + return result; +} +EXPORT_SYMBOL_GPL(uwb_rc_get_ie); + + +/* + * Given a pointer to an IE, print it in ASCII/hex followed by a new line + * + * @ie_hdr: pointer to the IE header. Length is in there, and it is + * guaranteed that the ie_hdr->length bytes following it are + * safely accesible. + * + * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx + */ +int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, + size_t offset, void *_ctx) +{ + struct uwb_buf_ctx *ctx = _ctx; + const u8 *pl = (void *)(ie_hdr + 1); + u8 pl_itr; + + ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes, + "%02x %02x ", (unsigned) ie_hdr->element_id, + (unsigned) ie_hdr->length); + pl_itr = 0; + while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size) + ctx->bytes += scnprintf(ctx->buf + ctx->bytes, + ctx->size - ctx->bytes, + "%02x ", (unsigned) pl[pl_itr++]); + if (ctx->bytes < ctx->size) + ctx->buf[ctx->bytes++] = '\n'; + return 0; +} +EXPORT_SYMBOL_GPL(uwb_ie_dump_hex); + + +/** + * Verify that a pointer in a buffer points to valid IE + * + * @start: pointer to start of buffer in which IE appears + * @itr: pointer to IE inside buffer that will be verified + * @top: pointer to end of buffer + * + * @returns: 0 if IE is valid, <0 otherwise + * + * Verification involves checking that the buffer can contain a + * header and the amount of data reported in the IE header can be found in + * the buffer. + */ +static +int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start, + const void *itr, const void *top) +{ + struct device *dev = &uwb_dev->dev; + const struct uwb_ie_hdr *ie_hdr; + + if (top - itr < sizeof(*ie_hdr)) { + dev_err(dev, "Bad IE: no data to decode header " + "(%zu bytes left vs %zu needed) at offset %zu\n", + top - itr, sizeof(*ie_hdr), itr - start); + return -EINVAL; + } + ie_hdr = itr; + itr += sizeof(*ie_hdr); + if (top - itr < ie_hdr->length) { + dev_err(dev, "Bad IE: not enough data for payload " + "(%zu bytes left vs %zu needed) at offset %zu\n", + top - itr, (size_t)ie_hdr->length, + (void *)ie_hdr - start); + return -EINVAL; + } + return 0; +} + + +/** + * Walk a buffer filled with consecutive IE's a buffer + * + * @uwb_dev: UWB device this IEs belong to (for err messages mainly) + * + * @fn: function to call with each IE; if it returns 0, we keep + * traversing the buffer. If it returns !0, we'll stop and return + * that value. + * + * @data: pointer passed to @fn + * + * @buf: buffer where the consecutive IEs are located + * + * @size: size of @buf + * + * Each IE is checked for basic correctness (there is space left for + * the header and the payload). If that test is failed, we stop + * processing. For every good IE, @fn is called. + */ +ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, + const void *buf, size_t size) +{ + ssize_t result = 0; + const struct uwb_ie_hdr *ie_hdr; + const void *itr = buf, *top = itr + size; + + while (itr < top) { + if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0) + break; + ie_hdr = itr; + itr += sizeof(*ie_hdr) + ie_hdr->length; + result = fn(uwb_dev, ie_hdr, itr - buf, data); + if (result != 0) + break; + } + return result; +} +EXPORT_SYMBOL_GPL(uwb_ie_for_each); + + +/** + * Replace all IEs currently being transmitted by a device + * + * @cmd: pointer to the SET-IE command with the IEs to set + * @size: size of @buf + */ +int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd) +{ + int result; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_evt_set_ie reply; + + reply.rceb.bEventType = UWB_RC_CET_GENERAL; + reply.rceb.wEvent = UWB_RC_CMD_SET_IE; + result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb, + sizeof(*cmd) + le16_to_cpu(cmd->wIELength), + &reply.rceb, sizeof(reply)); + if (result < 0) + goto error_cmd; + else if (result != sizeof(reply)) { + dev_err(dev, "SET-IE: not enough data to decode reply " + "(%d bytes received vs %zu needed)\n", + result, sizeof(reply)); + result = -EIO; + } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(dev, "SET-IE: command execution failed: %s (%d)\n", + uwb_rc_strerror(reply.bResultCode), reply.bResultCode); + result = -EIO; + } else + result = 0; +error_cmd: + return result; +} + +/** + * Determine by IE id if IE is host settable + * WUSB 1.0 [8.6.2.8 Table 8.85] + * + * EXCEPTION: + * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE + * is required for the WLP substack to perform association with its WSS so + * we hope that the WUSB spec will be changed to reflect this. + */ +static +int uwb_rc_ie_is_host_settable(enum uwb_ie element_id) +{ + if (element_id == UWB_PCA_AVAILABILITY || + element_id == UWB_BP_SWITCH_IE || + element_id == UWB_MAC_CAPABILITIES_IE || + element_id == UWB_PHY_CAPABILITIES_IE || + element_id == UWB_APP_SPEC_PROBE_IE || + element_id == UWB_IDENTIFICATION_IE || + element_id == UWB_MASTER_KEY_ID_IE || + element_id == UWB_IE_WLP || + element_id == UWB_APP_SPEC_IE) + return 1; + return 0; +} + + +/** + * Extract Host Settable IEs from IE + * + * @ie_data: pointer to buffer containing all IEs + * @size: size of buffer + * + * @returns: length of buffer that only includes host settable IEs + * + * Given a buffer of IEs we move all Host Settable IEs to front of buffer + * by overwriting the IEs that are not Host Settable. + * Buffer length is adjusted accordingly. + */ +static +ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev, + void *ie_data, size_t size) +{ + size_t new_len = size; + struct uwb_ie_hdr *ie_hdr; + size_t ie_length; + void *itr = ie_data, *top = itr + size; + + while (itr < top) { + if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0) + break; + ie_hdr = itr; + ie_length = sizeof(*ie_hdr) + ie_hdr->length; + if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) { + itr += ie_length; + } else { + memmove(itr, itr + ie_length, top - (itr + ie_length)); + new_len -= ie_length; + top -= ie_length; + } + } + return new_len; +} + + +/* Cleanup the whole IE management subsystem */ +void uwb_rc_ie_init(struct uwb_rc *uwb_rc) +{ + mutex_init(&uwb_rc->ies_mutex); +} + + +/** + * Set up cache for host settable IEs currently being transmitted + * + * First we just call GET-IE to get the current IEs being transmitted + * (or we workaround and pretend we did) and (because the format is + * the same) reuse that as the IE cache (with the command prefix, as + * explained in 'struct uwb_rc'). + * + * @returns: size of cache created + */ +ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc) +{ + struct device *dev = &uwb_rc->uwb_dev.dev; + ssize_t result; + size_t capacity; + struct uwb_rc_evt_get_ie *ie_info; + + d_fnstart(3, dev, "(%p)\n", uwb_rc); + mutex_lock(&uwb_rc->ies_mutex); + result = uwb_rc_get_ie(uwb_rc, &ie_info); + if (result < 0) + goto error_get_ie; + capacity = result; + d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result, + (size_t)le16_to_cpu(ie_info->wIELength), ie_info); + + /* Remove IEs that host should not set. */ + result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev, + ie_info->IEData, le16_to_cpu(ie_info->wIELength)); + if (result < 0) + goto error_parse; + d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result); + uwb_rc->ies = (void *) ie_info; + uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; + uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); + uwb_rc->ies_capacity = capacity; + d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n", + ie_info, result, capacity); + result = 0; +error_parse: +error_get_ie: + mutex_unlock(&uwb_rc->ies_mutex); + d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result); + return result; +} + + +/* Cleanup the whole IE management subsystem */ +void uwb_rc_ie_release(struct uwb_rc *uwb_rc) +{ + kfree(uwb_rc->ies); + uwb_rc->ies = NULL; + uwb_rc->ies_capacity = 0; +} + + +static +int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, + size_t offset, void *_ctx) +{ + size_t *acc_size = _ctx; + *acc_size += sizeof(*ie_hdr) + ie_hdr->length; + d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size); + return 0; +} + + +/** + * Add a new IE to IEs currently being transmitted by device + * + * @ies: the buffer containing the new IE or IEs to be added to + * the device's beacon. The buffer will be verified for + * consistence (meaning the headers should be right) and + * consistent with the buffer size. + * @size: size of @ies (in bytes, total buffer size) + * @returns: 0 if ok, <0 errno code on error + * + * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB + * after the device sent the first beacon that includes the IEs specified + * in the SET IE command. We thus cannot send this command if the device is + * not beaconing. Instead, a SET IE command will be sent later right after + * we start beaconing. + * + * Setting an IE on the device will overwrite all current IEs in device. So + * we take the current IEs being transmitted by the device, append the + * new one, and call SET IE with all the IEs needed. + * + * The local IE cache will only be updated with the new IE if SET IE + * completed successfully. + */ +int uwb_rc_ie_add(struct uwb_rc *uwb_rc, + const struct uwb_ie_hdr *ies, size_t size) +{ + int result = 0; + struct device *dev = &uwb_rc->uwb_dev.dev; + struct uwb_rc_cmd_set_ie *new_ies; + size_t ies_size, total_size, acc_size = 0; + + if (uwb_rc->ies == NULL) + return -ESHUTDOWN; + uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size); + if (acc_size != size) { + dev_err(dev, "BUG: bad IEs, misconstructed headers " + "[%zu bytes reported vs %zu calculated]\n", + size, acc_size); + WARN_ON(1); + return -EINVAL; + } + mutex_lock(&uwb_rc->ies_mutex); + ies_size = le16_to_cpu(uwb_rc->ies->wIELength); + total_size = sizeof(*uwb_rc->ies) + ies_size; + if (total_size + size > uwb_rc->ies_capacity) { + d_printf(4, dev, "Reallocating IE cache from %p capacity %zu " + "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity, + total_size + size); + new_ies = kzalloc(total_size + size, GFP_KERNEL); + if (new_ies == NULL) { + dev_err(dev, "No memory for adding new IE\n"); + result = -ENOMEM; + goto error_alloc; + } + memcpy(new_ies, uwb_rc->ies, total_size); + uwb_rc->ies_capacity = total_size + size; + kfree(uwb_rc->ies); + uwb_rc->ies = new_ies; + d_printf(4, dev, "New IE cache at %p capacity %zu\n", + uwb_rc->ies, uwb_rc->ies_capacity); + } + memcpy((void *)uwb_rc->ies + total_size, ies, size); + uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size); + if (uwb_rc->beaconing != -1) { + result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); + if (result < 0) { + dev_err(dev, "Cannot set new IE on device: %d\n", + result); + uwb_rc->ies->wIELength = cpu_to_le16(ies_size); + } else + result = 0; + } + d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n", + le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity, + uwb_rc->ies); +error_alloc: + mutex_unlock(&uwb_rc->ies_mutex); + return result; +} +EXPORT_SYMBOL_GPL(uwb_rc_ie_add); + + +/* + * Remove an IE from internal cache + * + * We are dealing with our internal IE cache so no need to verify that the + * IEs are valid (it has been done already). + * + * Should be called with ies_mutex held + * + * We do not break out once an IE is found in the cache. It is currently + * possible to have more than one IE with the same ID included in the + * beacon. We don't reallocate, we just mark the size smaller. + */ +static +int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) +{ + struct uwb_ie_hdr *ie_hdr; + size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength); + void *itr = uwb_rc->ies->IEData; + void *top = itr + new_len; + + while (itr < top) { + ie_hdr = itr; + if (ie_hdr->element_id != to_remove) { + itr += sizeof(*ie_hdr) + ie_hdr->length; + } else { + int ie_length; + ie_length = sizeof(*ie_hdr) + ie_hdr->length; + if (top - itr != ie_length) + memmove(itr, itr + ie_length, top - itr + ie_length); + top -= ie_length; + new_len -= ie_length; + } + } + uwb_rc->ies->wIELength = cpu_to_le16(new_len); + return 0; +} + + +/** + * Remove an IE currently being transmitted by device + * + * @element_id: id of IE to be removed from device's beacon + */ +int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) +{ + struct device *dev = &uwb_rc->uwb_dev.dev; + int result; + + if (uwb_rc->ies == NULL) + return -ESHUTDOWN; + mutex_lock(&uwb_rc->ies_mutex); + result = uwb_rc_ie_cache_rm(uwb_rc, element_id); + if (result < 0) + dev_err(dev, "Cannot remove IE from cache.\n"); + if (uwb_rc->beaconing != -1) { + result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); + if (result < 0) + dev_err(dev, "Cannot set new IE on device.\n"); + } + mutex_unlock(&uwb_rc->ies_mutex); + return result; +} +EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); + + +/** + * Create and set new Identification IE + * + * Currently only sets the Vendor ID. The Vendor ID is set from the OUI, + * which is obtained from the first three bytes from the MAC address. + */ +int uwb_rc_set_identification_ie(struct uwb_rc *uwb_rc) +{ + struct { + struct uwb_identification_ie id_ie; + struct uwb_dev_info dev_info; + struct uwb_vendor_id vendor_id; + } ie_data; + + ie_data.id_ie.hdr.element_id = UWB_IDENTIFICATION_IE; + ie_data.id_ie.hdr.length = sizeof(struct uwb_dev_info) + + sizeof(struct uwb_vendor_id); + + ie_data.dev_info.type = UWB_DEV_INFO_VENDOR_ID; + ie_data.dev_info.length = sizeof(struct uwb_vendor_id); + + ie_data.vendor_id.data[0] = uwb_rc->uwb_dev.mac_addr.data[0]; + ie_data.vendor_id.data[1] = uwb_rc->uwb_dev.mac_addr.data[1]; + ie_data.vendor_id.data[2] = uwb_rc->uwb_dev.mac_addr.data[2]; + + return uwb_rc_ie_add(uwb_rc, &ie_data.id_ie.hdr, sizeof(ie_data)); +} diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c new file mode 100644 index 0000000..2d27074 --- /dev/null +++ b/drivers/uwb/scan.c @@ -0,0 +1,133 @@ +/* + * Ultra Wide Band + * Scanning management + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * + * FIXME: docs + * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal + * with each other. Currently seems that START_BEACON while + * SCAN_ONLY will cancel the scan, so we need to update the + * state here. Clarification request sent by email on + * 10/05/2005. + * 10/28/2005 No clear answer heard--maybe we'll hack the API + * so that when we start beaconing, if the HC is + * scanning in a mode not compatible with beaconing + * we just fail. + */ + +#include +#include +#include "uwb-internal.h" + + +/** + * Start/stop scanning in a radio controller + * + * @rc: UWB Radio Controlller + * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12] + * @type: Type of scanning to do. + * @bpst_offset: value at which to start scanning (if type == + * UWB_SCAN_ONLY_STARTTIME) + * @returns: 0 if ok, < 0 errno code on error + * + * We put the command on kmalloc'ed memory as some arches cannot do + * USB from the stack. The reply event is copied from an stage buffer, + * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. + */ +int uwb_rc_scan(struct uwb_rc *rc, + unsigned channel, enum uwb_scan_type type, + unsigned bpst_offset) +{ + int result; + struct uwb_rc_cmd_scan *cmd; + struct uwb_rc_evt_confirm reply; + + result = -ENOMEM; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + goto error_kzalloc; + mutex_lock(&rc->uwb_dev.mutex); + cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; + cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN); + cmd->bChannelNumber = channel; + cmd->bScanState = type; + cmd->wStartTime = cpu_to_le16(bpst_offset); + reply.rceb.bEventType = UWB_RC_CET_GENERAL; + reply.rceb.wEvent = UWB_RC_CMD_SCAN; + result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd), + &reply.rceb, sizeof(reply)); + if (result < 0) + goto error_cmd; + if (reply.bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(&rc->uwb_dev.dev, + "SCAN: command execution failed: %s (%d)\n", + uwb_rc_strerror(reply.bResultCode), reply.bResultCode); + result = -EIO; + goto error_cmd; + } + rc->scanning = channel; + rc->scan_type = type; +error_cmd: + mutex_unlock(&rc->uwb_dev.mutex); + kfree(cmd); +error_kzalloc: + return result; +} + +/* + * Print scanning state + */ +static ssize_t uwb_rc_scan_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_rc *rc = uwb_dev->rc; + ssize_t result; + + mutex_lock(&rc->uwb_dev.mutex); + result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type); + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + +/* + * + */ +static ssize_t uwb_rc_scan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + struct uwb_rc *rc = uwb_dev->rc; + unsigned channel; + unsigned type; + unsigned bpst_offset = 0; + ssize_t result = -EINVAL; + + result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset); + if (result >= 2 && type < UWB_SCAN_TOP) + result = uwb_rc_scan(rc, channel, type, bpst_offset); + + return result < 0 ? result : size; +} + +/** Radio Control sysfs interface (declaration) */ +DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store); -- cgit v0.10.2 From 8cc13a09474bb30d15dbf449767bb6d0198a8bf8 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:09 +0100 Subject: uwb: add the UWB stack (reservation manager) DRP and reservation management. Signed-off-by: David Vrabel diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c new file mode 100644 index 0000000..3febd855 --- /dev/null +++ b/drivers/uwb/drp-avail.c @@ -0,0 +1,288 @@ +/* + * Ultra Wide Band + * DRP availability management + * + * Copyright (C) 2005-2006 Intel Corporation + * Reinette Chatre + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * Manage DRP Availability (the MAS available for DRP + * reservations). Thus: + * + * - Handle DRP Availability Change notifications + * + * - Allow the reservation manager to indicate MAS reserved/released + * by local (owned by/targeted at the radio controller) + * reservations. + * + * - Based on the two sources above, generate a DRP Availability IE to + * be included in the beacon. + * + * See also the documentation for struct uwb_drp_avail. + */ + +#include +#include +#include +#include +#include "uwb-internal.h" + +/** + * uwb_drp_avail_init - initialize an RC's MAS availability + * + * All MAS are available initially. The RC will inform use which + * slots are used for the BP (it may change in size). + */ +void uwb_drp_avail_init(struct uwb_rc *rc) +{ + bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS); + bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS); + bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS); +} + +/* + * Determine MAS available for new local reservations. + * + * avail = global & local & pending + */ +static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) +{ + bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); + bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); +} + +/** + * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation + * @rc: the radio controller + * @mas: the MAS to reserve + * + * Returns 0 on success, or -EBUSY if the MAS requested aren't available. + */ +int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas) +{ + struct uwb_mas_bm avail; + + uwb_drp_available(rc, &avail); + if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS)) + return -EBUSY; + + bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); + return 0; +} + +/** + * uwb_drp_avail_reserve - reserve MAS for an established reservation + * @rc: the radio controller + * @mas: the MAS to reserve + */ +void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas) +{ + bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); + bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); + rc->drp_avail.ie_valid = false; +} + +/** + * uwb_drp_avail_release - release MAS from a pending or established reservation + * @rc: the radio controller + * @mas: the MAS to release + */ +void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas) +{ + bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); + bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); + rc->drp_avail.ie_valid = false; +} + +/** + * uwb_drp_avail_ie_update - update the DRP Availability IE + * @rc: the radio controller + * + * avail = global & local + */ +void uwb_drp_avail_ie_update(struct uwb_rc *rc) +{ + struct uwb_mas_bm avail; + + bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); + + rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY; + rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8; + uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail); + rc->drp_avail.ie_valid = true; +} + +/** + * Create an unsigned long from a buffer containing a byte stream. + * + * @array: pointer to buffer + * @itr: index of buffer from where we start + * @len: the buffer's remaining size may not be exact multiple of + * sizeof(unsigned long), @len is the length of buffer that needs + * to be converted. This will be sizeof(unsigned long) or smaller + * (BUG if not). If it is smaller then we will pad the remaining + * space of the result with zeroes. + */ +static +unsigned long get_val(u8 *array, size_t itr, size_t len) +{ + unsigned long val = 0; + size_t top = itr + len; + + BUG_ON(len > sizeof(val)); + + while (itr < top) { + val <<= 8; + val |= array[top - 1]; + top--; + } + val <<= 8 * (sizeof(val) - len); /* padding */ + return val; +} + +/** + * Initialize bitmap from data buffer. + * + * The bitmap to be converted could come from a IE, for example a + * DRP Availability IE. + * From ECMA-368 1.0 [16.8.7]: " + * octets: 1 1 N * (0 to 32) + * Element ID Length (=N) DRP Availability Bitmap + * + * The DRP Availability Bitmap field is up to 256 bits long, one + * bit for each MAS in the superframe, where the least-significant + * bit of the field corresponds to the first MAS in the superframe + * and successive bits correspond to successive MASs." + * + * The DRP Availability bitmap is in octets from 0 to 32, so octet + * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32 + * octets, the bits in octets not included at the end of the bitmap are + * treated as zero. In this case (when the bitmap is smaller than 32 + * octets) the MAS represented range from MAS 1 to MAS (size of bitmap) + * with the last octet still containing bits for MAS 1-8, etc. + * + * For example: + * F00F0102 03040506 0708090A 0B0C0D0E 0F010203 + * ^^^^ + * |||| + * |||| + * |||\LSB of byte is MAS 9 + * ||\MSB of byte is MAS 16 + * |\LSB of first byte is MAS 1 + * \ MSB of byte is MAS 8 + * + * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11] + * + * The resulting bitmap will have the following mapping: + * bit position 0 == MAS 1 + * bit position 1 == MAS 2 + * ... + * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS + * + * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP) + * @buffer: pointer to buffer containing bitmap data in big endian + * format (MSB first) + * @buffer_size:number of bytes with which bitmap should be initialized + */ +static +void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer, + size_t buffer_size) +{ + u8 *buffer = _buffer; + size_t itr, len; + unsigned long val; + + itr = 0; + while (itr < buffer_size) { + len = buffer_size - itr >= sizeof(val) ? + sizeof(val) : buffer_size - itr; + val = get_val(buffer, itr, len); + bmp_itr[itr / sizeof(val)] = val; + itr += sizeof(val); + } +} + + +/** + * Extract DRP Availability bitmap from the notification. + * + * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes + * We convert that to our internal representation. + */ +static +int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp) +{ + struct device *dev = &evt->rc->uwb_dev.dev; + struct uwb_rc_evt_drp_avail *drp_evt; + int result = -EINVAL; + + /* Is there enough data to decode the event? */ + if (evt->notif.size < sizeof(*drp_evt)) { + dev_err(dev, "DRP Availability Change: Not enough " + "data to decode event [%zu bytes, %zu " + "needed]\n", evt->notif.size, sizeof(*drp_evt)); + goto error; + } + drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb); + buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8); + result = 0; +error: + return result; +} + + +/** + * Process an incoming DRP Availability notification. + * + * @evt: Event information (packs the actual event data, which + * radio controller it came to, etc). + * + * @returns: 0 on success (so uwbd() frees the event buffer), < 0 + * on error. + * + * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that + * the MAS slot is available, bits set to ZERO indicate that the slot + * is busy. + * + * So we clear available slots, we set used slots :) + * + * The notification only marks non-availability based on the BP and + * received DRP IEs that are not for this radio controller. A copy of + * this bitmap is needed to generate the real availability (which + * includes local and pending reservations). + * + * The DRP Availability IE that this radio controller emits will need + * to be updated. + */ +int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt) +{ + int result; + struct uwb_rc *rc = evt->rc; + DECLARE_BITMAP(bmp, UWB_NUM_MAS); + + result = uwbd_evt_get_drp_avail(evt, bmp); + if (result < 0) + return result; + + mutex_lock(&rc->rsvs_mutex); + bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); + rc->drp_avail.ie_valid = false; + mutex_unlock(&rc->rsvs_mutex); + + uwb_rsv_sched_update(rc); + + return 0; +} diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c new file mode 100644 index 0000000..882724c --- /dev/null +++ b/drivers/uwb/drp-ie.c @@ -0,0 +1,232 @@ +/* + * UWB DRP IE management. + * + * Copyright (C) 2005-2006 Intel Corporation + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include + +#include "uwb-internal.h" + +/* + * Allocate a DRP IE. + * + * To save having to free/allocate a DRP IE when its MAS changes, + * enough memory is allocated for the maxiumum number of DRP + * allocation fields. This gives an overhead per reservation of up to + * (UWB_NUM_ZONES - 1) * 4 = 60 octets. + */ +static struct uwb_ie_drp *uwb_drp_ie_alloc(void) +{ + struct uwb_ie_drp *drp_ie; + unsigned tiebreaker; + + drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + + UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), + GFP_KERNEL); + if (drp_ie) { + drp_ie->hdr.element_id = UWB_IE_DRP; + + get_random_bytes(&tiebreaker, sizeof(unsigned)); + uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); + } + return drp_ie; +} + + +/* + * Fill a DRP IE's allocation fields from a MAS bitmap. + */ +static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, + struct uwb_mas_bm *mas) +{ + int z, i, num_fields = 0, next = 0; + struct uwb_drp_alloc *zones; + __le16 current_bmp; + DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); + DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); + + zones = drp_ie->allocs; + + bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); + + /* Determine unique MAS bitmaps in zones from bitmap. */ + for (z = 0; z < UWB_NUM_ZONES; z++) { + bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); + if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { + bool found = false; + current_bmp = (__le16) *tmp_mas_bm; + for (i = 0; i < next; i++) { + if (current_bmp == zones[i].mas_bm) { + zones[i].zone_bm |= 1 << z; + found = true; + break; + } + } + if (!found) { + num_fields++; + zones[next].zone_bm = 1 << z; + zones[next].mas_bm = current_bmp; + next++; + } + } + bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); + } + + /* Store in format ready for transmission (le16). */ + for (i = 0; i < num_fields; i++) { + drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); + drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); + } + + drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) + + num_fields * sizeof(struct uwb_drp_alloc); +} + +/** + * uwb_drp_ie_update - update a reservation's DRP IE + * @rsv: the reservation + */ +int uwb_drp_ie_update(struct uwb_rsv *rsv) +{ + struct device *dev = &rsv->rc->uwb_dev.dev; + struct uwb_ie_drp *drp_ie; + int reason_code, status; + + switch (rsv->state) { + case UWB_RSV_STATE_NONE: + kfree(rsv->drp_ie); + rsv->drp_ie = NULL; + return 0; + case UWB_RSV_STATE_O_INITIATED: + reason_code = UWB_DRP_REASON_ACCEPTED; + status = 0; + break; + case UWB_RSV_STATE_O_PENDING: + reason_code = UWB_DRP_REASON_ACCEPTED; + status = 0; + break; + case UWB_RSV_STATE_O_MODIFIED: + reason_code = UWB_DRP_REASON_MODIFIED; + status = 1; + break; + case UWB_RSV_STATE_O_ESTABLISHED: + reason_code = UWB_DRP_REASON_ACCEPTED; + status = 1; + break; + case UWB_RSV_STATE_T_ACCEPTED: + reason_code = UWB_DRP_REASON_ACCEPTED; + status = 1; + break; + case UWB_RSV_STATE_T_DENIED: + reason_code = UWB_DRP_REASON_DENIED; + status = 0; + break; + default: + dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); + return -EINVAL; + } + + if (rsv->drp_ie == NULL) { + rsv->drp_ie = uwb_drp_ie_alloc(); + if (rsv->drp_ie == NULL) + return -ENOMEM; + } + drp_ie = rsv->drp_ie; + + uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); + uwb_ie_drp_set_status(drp_ie, status); + uwb_ie_drp_set_reason_code(drp_ie, reason_code); + uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); + uwb_ie_drp_set_type(drp_ie, rsv->type); + + if (uwb_rsv_is_owner(rsv)) { + switch (rsv->target.type) { + case UWB_RSV_TARGET_DEV: + drp_ie->dev_addr = rsv->target.dev->dev_addr; + break; + case UWB_RSV_TARGET_DEVADDR: + drp_ie->dev_addr = rsv->target.devaddr; + break; + } + } else + drp_ie->dev_addr = rsv->owner->dev_addr; + + uwb_drp_ie_from_bm(drp_ie, &rsv->mas); + + rsv->ie_valid = true; + return 0; +} + +/* + * Set MAS bits from given MAS bitmap in a single zone of large bitmap. + * + * We are given a zone id and the MAS bitmap of bits that need to be set in + * this zone. Note that this zone may already have bits set and this only + * adds settings - we cannot simply assign the MAS bitmap contents to the + * zone contents. We iterate over the the bits (MAS) in the zone and set the + * bits that are set in the given MAS bitmap. + */ +static +void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) +{ + int mas; + u16 mas_mask; + + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { + mas_mask = 1 << mas; + if (mas_bm & mas_mask) + set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); + } +} + +/** + * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap + * @mas: MAS bitmap that will be populated to correspond to the + * allocation fields in the DRP IE + * @drp_ie: the DRP IE that contains the allocation fields. + * + * The input format is an array of MAS allocation fields (16 bit Zone + * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section + * 16.8.6. The output is a full 256 bit MAS bitmap. + * + * We go over all the allocation fields, for each allocation field we + * know which zones are impacted. We iterate over all the zones + * impacted and call a function that will set the correct MAS bits in + * each zone. + */ +void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) +{ + int numallocs = (drp_ie->hdr.length - 4) / 4; + const struct uwb_drp_alloc *alloc; + int cnt; + u16 zone_bm, mas_bm; + u8 zone; + u16 zone_mask; + + for (cnt = 0; cnt < numallocs; cnt++) { + alloc = &drp_ie->allocs[cnt]; + zone_bm = le16_to_cpu(alloc->zone_bm); + mas_bm = le16_to_cpu(alloc->mas_bm); + for (zone = 0; zone < UWB_NUM_ZONES; zone++) { + zone_mask = 1 << zone; + if (zone_bm & zone_mask) + uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); + } + } +} diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c new file mode 100644 index 0000000..c0b1e5e --- /dev/null +++ b/drivers/uwb/drp.c @@ -0,0 +1,461 @@ +/* + * Ultra Wide Band + * Dynamic Reservation Protocol handling + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include "uwb-internal.h" + +/** + * Construct and send the SET DRP IE + * + * @rc: UWB Host controller + * @returns: >= 0 number of bytes still available in the beacon + * < 0 errno code on error. + * + * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the + * device to include in its beacon at the same time. We thus have to + * traverse all reservations and include the DRP IEs of all PENDING + * and NEGOTIATED reservations in a SET DRP command for transmission. + * + * A DRP Availability IE is appended. + * + * rc->uwb_dev.mutex is held + * + * FIXME We currently ignore the returned value indicating the remaining space + * in beacon. This could be used to deny reservation requests earlier if + * determined that they would cause the beacon space to be exceeded. + */ +static +int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) +{ + int result; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_rc_cmd_set_drp_ie *cmd; + struct uwb_rc_evt_set_drp_ie reply; + struct uwb_rsv *rsv; + int num_bytes = 0; + u8 *IEDataptr; + + result = -ENOMEM; + /* First traverse all reservations to determine memory needed. */ + list_for_each_entry(rsv, &rc->reservations, rc_node) { + if (rsv->drp_ie != NULL) + num_bytes += rsv->drp_ie->hdr.length + 2; + } + num_bytes += sizeof(rc->drp_avail.ie); + cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); + if (cmd == NULL) + goto error; + cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; + cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE); + cmd->wIELength = num_bytes; + IEDataptr = (u8 *)&cmd->IEData[0]; + + /* Next traverse all reservations to place IEs in allocated memory. */ + list_for_each_entry(rsv, &rc->reservations, rc_node) { + if (rsv->drp_ie != NULL) { + memcpy(IEDataptr, rsv->drp_ie, + rsv->drp_ie->hdr.length + 2); + IEDataptr += rsv->drp_ie->hdr.length + 2; + } + } + memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); + + reply.rceb.bEventType = UWB_RC_CET_GENERAL; + reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; + result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, + sizeof(*cmd) + num_bytes, &reply.rceb, + sizeof(reply)); + if (result < 0) + goto error_cmd; + result = le16_to_cpu(reply.wRemainingSpace); + if (reply.bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " + "failed: %s (%d). RemainingSpace in beacon " + "= %d\n", uwb_rc_strerror(reply.bResultCode), + reply.bResultCode, result); + result = -EIO; + } else { + dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " + "= %d.\n", result); + result = 0; + } +error_cmd: + kfree(cmd); +error: + return result; + +} +/** + * Send all DRP IEs associated with this host + * + * @returns: >= 0 number of bytes still available in the beacon + * < 0 errno code on error. + * + * As per the protocol we obtain the host controller device lock to access + * bandwidth structures. + */ +int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) +{ + int result; + + mutex_lock(&rc->uwb_dev.mutex); + result = uwb_rc_gen_send_drp_ie(rc); + mutex_unlock(&rc->uwb_dev.mutex); + return result; +} + +void uwb_drp_handle_timeout(struct uwb_rsv *rsv) +{ + struct device *dev = &rsv->rc->uwb_dev.dev; + + dev_dbg(dev, "reservation timeout in state %s (%d)\n", + uwb_rsv_state_str(rsv->state), rsv->state); + + switch (rsv->state) { + case UWB_RSV_STATE_O_INITIATED: + if (rsv->is_multicast) { + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); + return; + } + break; + case UWB_RSV_STATE_O_ESTABLISHED: + if (rsv->is_multicast) + return; + break; + default: + break; + } + uwb_rsv_remove(rsv); +} + +/* + * Based on the DRP IE, transition a target reservation to a new + * state. + */ +static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, + struct uwb_ie_drp *drp_ie) +{ + struct device *dev = &rc->uwb_dev.dev; + int status; + enum uwb_drp_reason reason_code; + + status = uwb_ie_drp_status(drp_ie); + reason_code = uwb_ie_drp_reason_code(drp_ie); + + if (status) { + switch (reason_code) { + case UWB_DRP_REASON_ACCEPTED: + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); + break; + case UWB_DRP_REASON_MODIFIED: + dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", + reason_code, status); + break; + default: + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", + reason_code, status); + } + } else { + switch (reason_code) { + case UWB_DRP_REASON_ACCEPTED: + /* New reservations are handled in uwb_rsv_find(). */ + break; + case UWB_DRP_REASON_DENIED: + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); + break; + case UWB_DRP_REASON_CONFLICT: + case UWB_DRP_REASON_MODIFIED: + dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", + reason_code, status); + break; + default: + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", + reason_code, status); + } + } +} + +/* + * Based on the DRP IE, transition an owner reservation to a new + * state. + */ +static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, + struct uwb_ie_drp *drp_ie) +{ + struct device *dev = &rc->uwb_dev.dev; + int status; + enum uwb_drp_reason reason_code; + + status = uwb_ie_drp_status(drp_ie); + reason_code = uwb_ie_drp_reason_code(drp_ie); + + if (status) { + switch (reason_code) { + case UWB_DRP_REASON_ACCEPTED: + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); + break; + case UWB_DRP_REASON_MODIFIED: + dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", + reason_code, status); + break; + default: + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", + reason_code, status); + } + } else { + switch (reason_code) { + case UWB_DRP_REASON_PENDING: + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING); + break; + case UWB_DRP_REASON_DENIED: + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); + break; + case UWB_DRP_REASON_CONFLICT: + case UWB_DRP_REASON_MODIFIED: + dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", + reason_code, status); + break; + default: + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", + reason_code, status); + } + } +} + +/* + * Process a received DRP IE, it's either for a reservation owned by + * the RC or targeted at it (or it's for a WUSB cluster reservation). + */ +static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, + struct uwb_ie_drp *drp_ie) +{ + struct uwb_rsv *rsv; + + rsv = uwb_rsv_find(rc, src, drp_ie); + if (!rsv) { + /* + * No reservation? It's either for a recently + * terminated reservation; or the DRP IE couldn't be + * processed (e.g., an invalid IE or out of memory). + */ + return; + } + + /* + * Do nothing with DRP IEs for reservations that have been + * terminated. + */ + if (rsv->state == UWB_RSV_STATE_NONE) { + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); + return; + } + + if (uwb_ie_drp_owner(drp_ie)) + uwb_drp_process_target(rc, rsv, drp_ie); + else + uwb_drp_process_owner(rc, rsv, drp_ie); +} + + +/* + * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) + * from a device. + */ +static +void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, + size_t ielen, struct uwb_dev *src_dev) +{ + struct device *dev = &rc->uwb_dev.dev; + struct uwb_ie_hdr *ie_hdr; + void *ptr; + + ptr = drp_evt->ie_data; + for (;;) { + ie_hdr = uwb_ie_next(&ptr, &ielen); + if (!ie_hdr) + break; + + switch (ie_hdr->element_id) { + case UWB_IE_DRP_AVAILABILITY: + /* FIXME: does something need to be done with this? */ + break; + case UWB_IE_DRP: + uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); + break; + default: + dev_warn(dev, "unexpected IE in DRP notification\n"); + break; + } + } + + if (ielen > 0) + dev_warn(dev, "%d octets remaining in DRP notification\n", + (int)ielen); +} + + +/* + * Go through all the DRP IEs and find the ones that conflict with our + * reservations. + * + * FIXME: must resolve the conflict according the the rules in + * [ECMA-368]. + */ +static +void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, + size_t ielen, struct uwb_dev *src_dev) +{ + struct device *dev = &rc->uwb_dev.dev; + struct uwb_ie_hdr *ie_hdr; + struct uwb_ie_drp *drp_ie; + void *ptr; + + ptr = drp_evt->ie_data; + for (;;) { + ie_hdr = uwb_ie_next(&ptr, &ielen); + if (!ie_hdr) + break; + + drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); + + /* FIXME: check if this DRP IE conflicts. */ + } + + if (ielen > 0) + dev_warn(dev, "%d octets remaining in DRP notification\n", + (int)ielen); +} + + +/* + * Terminate all reservations owned by, or targeted at, 'uwb_dev'. + */ +static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) +{ + struct uwb_rsv *rsv; + + list_for_each_entry(rsv, &rc->reservations, rc_node) { + if (rsv->owner == uwb_dev + || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) + uwb_rsv_remove(rsv); + } +} + + +/** + * uwbd_evt_handle_rc_drp - handle a DRP_IE event + * @evt: the DRP_IE event from the radio controller + * + * This processes DRP notifications from the radio controller, either + * initiating a new reservation or transitioning an existing + * reservation into a different state. + * + * DRP notifications can occur for three different reasons: + * + * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as + * the target or source have been recieved. + * + * These DRP IEs could be new or for an existing reservation. + * + * If the DRP IE for an existing reservation ceases to be to + * recieved for at least mMaxLostBeacons, the reservation should be + * considered to be terminated. Note that the TERMINATE reason (see + * below) may not always be signalled (e.g., the remote device has + * two or more reservations established with the RC). + * + * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon + * group conflict with the RC's reservations. + * + * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received + * from a device (i.e., it's terminated all reservations). + * + * Only the software state of the reservations is changed; the setting + * of the radio controller's DRP IEs is done after all the events in + * an event buffer are processed. This saves waiting multiple times + * for the SET_DRP_IE command to complete. + */ +int uwbd_evt_handle_rc_drp(struct uwb_event *evt) +{ + struct device *dev = &evt->rc->uwb_dev.dev; + struct uwb_rc *rc = evt->rc; + struct uwb_rc_evt_drp *drp_evt; + size_t ielength, bytes_left; + struct uwb_dev_addr src_addr; + struct uwb_dev *src_dev; + int reason; + + /* Is there enough data to decode the event (and any IEs in + its payload)? */ + if (evt->notif.size < sizeof(*drp_evt)) { + dev_err(dev, "DRP event: Not enough data to decode event " + "[%zu bytes left, %zu needed]\n", + evt->notif.size, sizeof(*drp_evt)); + return 0; + } + bytes_left = evt->notif.size - sizeof(*drp_evt); + drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb); + ielength = le16_to_cpu(drp_evt->ie_length); + if (bytes_left != ielength) { + dev_err(dev, "DRP event: Not enough data in payload [%zu" + "bytes left, %zu declared in the event]\n", + bytes_left, ielength); + return 0; + } + + memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr)); + src_dev = uwb_dev_get_by_devaddr(rc, &src_addr); + if (!src_dev) { + /* + * A DRP notification from an unrecognized device. + * + * This is probably from a WUSB device that doesn't + * have an EUI-48 and therefore doesn't show up in the + * UWB device database. It's safe to simply ignore + * these. + */ + return 0; + } + + mutex_lock(&rc->rsvs_mutex); + + reason = uwb_rc_evt_drp_reason(drp_evt); + + switch (reason) { + case UWB_DRP_NOTIF_DRP_IE_RCVD: + uwb_drp_process_all(rc, drp_evt, ielength, src_dev); + break; + case UWB_DRP_NOTIF_CONFLICT: + uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); + break; + case UWB_DRP_NOTIF_TERMINATE: + uwb_drp_terminate_all(rc, src_dev); + break; + default: + dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); + break; + } + + mutex_unlock(&rc->rsvs_mutex); + + uwb_dev_put(src_dev); + return 0; +} diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c new file mode 100644 index 0000000..bae16204 --- /dev/null +++ b/drivers/uwb/rsv.c @@ -0,0 +1,680 @@ +/* + * UWB reservation management. + * + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include + +#include "uwb-internal.h" + +static void uwb_rsv_timer(unsigned long arg); + +static const char *rsv_states[] = { + [UWB_RSV_STATE_NONE] = "none", + [UWB_RSV_STATE_O_INITIATED] = "initiated", + [UWB_RSV_STATE_O_PENDING] = "pending", + [UWB_RSV_STATE_O_MODIFIED] = "modified", + [UWB_RSV_STATE_O_ESTABLISHED] = "established", + [UWB_RSV_STATE_T_ACCEPTED] = "accepted", + [UWB_RSV_STATE_T_DENIED] = "denied", + [UWB_RSV_STATE_T_PENDING] = "pending", +}; + +static const char *rsv_types[] = { + [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", + [UWB_DRP_TYPE_HARD] = "hard", + [UWB_DRP_TYPE_SOFT] = "soft", + [UWB_DRP_TYPE_PRIVATE] = "private", + [UWB_DRP_TYPE_PCA] = "pca", +}; + +/** + * uwb_rsv_state_str - return a string for a reservation state + * @state: the reservation state. + */ +const char *uwb_rsv_state_str(enum uwb_rsv_state state) +{ + if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) + return "unknown"; + return rsv_states[state]; +} +EXPORT_SYMBOL_GPL(uwb_rsv_state_str); + +/** + * uwb_rsv_type_str - return a string for a reservation type + * @type: the reservation type + */ +const char *uwb_rsv_type_str(enum uwb_drp_type type) +{ + if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) + return "invalid"; + return rsv_types[type]; +} +EXPORT_SYMBOL_GPL(uwb_rsv_type_str); + +static void uwb_rsv_dump(struct uwb_rsv *rsv) +{ + struct device *dev = &rsv->rc->uwb_dev.dev; + struct uwb_dev_addr devaddr; + char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; + + uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); + if (rsv->target.type == UWB_RSV_TARGET_DEV) + devaddr = rsv->target.dev->dev_addr; + else + devaddr = rsv->target.devaddr; + uwb_dev_addr_print(target, sizeof(target), &devaddr); + + dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); +} + +/* + * Get a free stream index for a reservation. + * + * If the target is a DevAddr (e.g., a WUSB cluster reservation) then + * the stream is allocated from a pool of per-RC stream indexes, + * otherwise a unique stream index for the target is selected. + */ +static int uwb_rsv_get_stream(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + unsigned long *streams_bm; + int stream; + + switch (rsv->target.type) { + case UWB_RSV_TARGET_DEV: + streams_bm = rsv->target.dev->streams; + break; + case UWB_RSV_TARGET_DEVADDR: + streams_bm = rc->uwb_dev.streams; + break; + default: + return -EINVAL; + } + + stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); + if (stream >= UWB_NUM_STREAMS) + return -EBUSY; + + rsv->stream = stream; + set_bit(stream, streams_bm); + + return 0; +} + +static void uwb_rsv_put_stream(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + unsigned long *streams_bm; + + switch (rsv->target.type) { + case UWB_RSV_TARGET_DEV: + streams_bm = rsv->target.dev->streams; + break; + case UWB_RSV_TARGET_DEVADDR: + streams_bm = rc->uwb_dev.streams; + break; + default: + return; + } + + clear_bit(rsv->stream, streams_bm); +} + +/* + * Generate a MAS allocation with a single row component. + */ +static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, + int first_mas, int mas_per_zone, + int zs, int ze) +{ + struct uwb_mas_bm col; + int z; + + bitmap_zero(mas->bm, UWB_NUM_MAS); + bitmap_zero(col.bm, UWB_NUM_MAS); + bitmap_fill(col.bm, mas_per_zone); + bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); + + for (z = zs; z <= ze; z++) { + bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); + bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); + } +} + +/* + * Allocate some MAS for this reservation based on current local + * availability, the reservation parameters (max_mas, min_mas, + * sparsity), and the WiMedia rules for MAS allocations. + * + * Returns -EBUSY is insufficient free MAS are available. + * + * FIXME: to simplify this, only safe reservations with a single row + * component in zones 1 to 15 are tried (zone 0 is skipped to avoid + * problems with the MAS reserved for the BP). + * + * [ECMA-368] section B.2. + */ +static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) +{ + static const int safe_mas_in_row[UWB_NUM_ZONES] = { + 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, + }; + int n, r; + struct uwb_mas_bm mas; + bool found = false; + + /* + * Search all valid safe allocations until either: too few MAS + * are available; or the smallest allocation with sufficient + * MAS is found. + * + * The top of the zones are preferred, so space for larger + * allocations is available in the bottom of the zone (e.g., a + * 15 MAS allocation should start in row 14 leaving space for + * a 120 MAS allocation at row 0). + */ + for (n = safe_mas_in_row[0]; n >= 1; n--) { + int num_mas; + + num_mas = n * (UWB_NUM_ZONES - 1); + if (num_mas < rsv->min_mas) + break; + if (found && num_mas < rsv->max_mas) + break; + + for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { + if (safe_mas_in_row[r] < n) + continue; + uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); + if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { + found = true; + break; + } + } + } + + if (!found) + return -EBUSY; + + bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); + return 0; +} + +static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) +{ + int sframes = UWB_MAX_LOST_BEACONS; + + /* + * Multicast reservations can become established within 1 + * super frame and should not be terminated if no response is + * received. + */ + if (rsv->is_multicast) { + if (rsv->state == UWB_RSV_STATE_O_INITIATED) + sframes = 1; + if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) + sframes = 0; + } + + rsv->expired = false; + if (sframes > 0) { + /* + * Add an additional 2 superframes to account for the + * time to send the SET DRP IE command. + */ + unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; + mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); + } else + del_timer(&rsv->timer); +} + +/* + * Update a reservations state, and schedule an update of the + * transmitted DRP IEs. + */ +static void uwb_rsv_state_update(struct uwb_rsv *rsv, + enum uwb_rsv_state new_state) +{ + rsv->state = new_state; + rsv->ie_valid = false; + + uwb_rsv_dump(rsv); + + uwb_rsv_stroke_timer(rsv); + uwb_rsv_sched_update(rsv->rc); +} + +static void uwb_rsv_callback(struct uwb_rsv *rsv) +{ + if (rsv->callback) + rsv->callback(rsv); +} + +void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) +{ + if (rsv->state == new_state) { + switch (rsv->state) { + case UWB_RSV_STATE_O_ESTABLISHED: + case UWB_RSV_STATE_T_ACCEPTED: + case UWB_RSV_STATE_NONE: + uwb_rsv_stroke_timer(rsv); + break; + default: + /* Expecting a state transition so leave timer + as-is. */ + break; + } + return; + } + + switch (new_state) { + case UWB_RSV_STATE_NONE: + uwb_drp_avail_release(rsv->rc, &rsv->mas); + uwb_rsv_put_stream(rsv); + uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); + uwb_rsv_callback(rsv); + break; + case UWB_RSV_STATE_O_INITIATED: + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); + break; + case UWB_RSV_STATE_O_PENDING: + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); + break; + case UWB_RSV_STATE_O_ESTABLISHED: + uwb_drp_avail_reserve(rsv->rc, &rsv->mas); + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); + uwb_rsv_callback(rsv); + break; + case UWB_RSV_STATE_T_ACCEPTED: + uwb_drp_avail_reserve(rsv->rc, &rsv->mas); + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); + uwb_rsv_callback(rsv); + break; + case UWB_RSV_STATE_T_DENIED: + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); + break; + default: + dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", + uwb_rsv_state_str(new_state), new_state); + } +} + +static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) +{ + struct uwb_rsv *rsv; + + rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); + if (!rsv) + return NULL; + + INIT_LIST_HEAD(&rsv->rc_node); + INIT_LIST_HEAD(&rsv->pal_node); + init_timer(&rsv->timer); + rsv->timer.function = uwb_rsv_timer; + rsv->timer.data = (unsigned long)rsv; + + rsv->rc = rc; + + return rsv; +} + +static void uwb_rsv_free(struct uwb_rsv *rsv) +{ + uwb_dev_put(rsv->owner); + if (rsv->target.type == UWB_RSV_TARGET_DEV) + uwb_dev_put(rsv->target.dev); + kfree(rsv); +} + +/** + * uwb_rsv_create - allocate and initialize a UWB reservation structure + * @rc: the radio controller + * @cb: callback to use when the reservation completes or terminates + * @pal_priv: data private to the PAL to be passed in the callback + * + * The callback is called when the state of the reservation changes from: + * + * - pending to accepted + * - pending to denined + * - accepted to terminated + * - pending to terminated + */ +struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) +{ + struct uwb_rsv *rsv; + + rsv = uwb_rsv_alloc(rc); + if (!rsv) + return NULL; + + rsv->callback = cb; + rsv->pal_priv = pal_priv; + + return rsv; +} +EXPORT_SYMBOL_GPL(uwb_rsv_create); + +void uwb_rsv_remove(struct uwb_rsv *rsv) +{ + if (rsv->state != UWB_RSV_STATE_NONE) + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); + del_timer_sync(&rsv->timer); + list_del(&rsv->rc_node); + uwb_rsv_free(rsv); +} + +/** + * uwb_rsv_destroy - free a UWB reservation structure + * @rsv: the reservation to free + * + * The reservation will be terminated if it is pending or established. + */ +void uwb_rsv_destroy(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + + mutex_lock(&rc->rsvs_mutex); + uwb_rsv_remove(rsv); + mutex_unlock(&rc->rsvs_mutex); +} +EXPORT_SYMBOL_GPL(uwb_rsv_destroy); + +/** + * usb_rsv_establish - start a reservation establishment + * @rsv: the reservation + * + * The PAL should fill in @rsv's owner, target, type, max_mas, + * min_mas, sparsity and is_multicast fields. If the target is a + * uwb_dev it must be referenced. + * + * The reservation's callback will be called when the reservation is + * accepted, denied or times out. + */ +int uwb_rsv_establish(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + int ret; + + mutex_lock(&rc->rsvs_mutex); + + ret = uwb_rsv_get_stream(rsv); + if (ret) + goto out; + + ret = uwb_rsv_alloc_mas(rsv); + if (ret) { + uwb_rsv_put_stream(rsv); + goto out; + } + + list_add_tail(&rsv->rc_node, &rc->reservations); + rsv->owner = &rc->uwb_dev; + uwb_dev_get(rsv->owner); + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); +out: + mutex_unlock(&rc->rsvs_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(uwb_rsv_establish); + +/** + * uwb_rsv_modify - modify an already established reservation + * @rsv: the reservation to modify + * @max_mas: new maximum MAS to reserve + * @min_mas: new minimum MAS to reserve + * @sparsity: new sparsity to use + * + * FIXME: implement this once there are PALs that use it. + */ +int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) +{ + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(uwb_rsv_modify); + +/** + * uwb_rsv_terminate - terminate an established reservation + * @rsv: the reservation to terminate + * + * A reservation is terminated by removing the DRP IE from the beacon, + * the other end will consider the reservation to be terminated when + * it does not see the DRP IE for at least mMaxLostBeacons. + * + * If applicable, the reference to the target uwb_dev will be released. + */ +void uwb_rsv_terminate(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + + mutex_lock(&rc->rsvs_mutex); + + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); + + mutex_unlock(&rc->rsvs_mutex); +} +EXPORT_SYMBOL_GPL(uwb_rsv_terminate); + +/** + * uwb_rsv_accept - accept a new reservation from a peer + * @rsv: the reservation + * @cb: call back for reservation changes + * @pal_priv: data to be passed in the above call back + * + * Reservation requests from peers are denied unless a PAL accepts it + * by calling this function. + */ +void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) +{ + rsv->callback = cb; + rsv->pal_priv = pal_priv; + rsv->state = UWB_RSV_STATE_T_ACCEPTED; +} +EXPORT_SYMBOL_GPL(uwb_rsv_accept); + +/* + * Is a received DRP IE for this reservation? + */ +static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, + struct uwb_ie_drp *drp_ie) +{ + struct uwb_dev_addr *rsv_src; + int stream; + + stream = uwb_ie_drp_stream_index(drp_ie); + + if (rsv->stream != stream) + return false; + + switch (rsv->target.type) { + case UWB_RSV_TARGET_DEVADDR: + return rsv->stream == stream; + case UWB_RSV_TARGET_DEV: + if (uwb_ie_drp_owner(drp_ie)) + rsv_src = &rsv->owner->dev_addr; + else + rsv_src = &rsv->target.dev->dev_addr; + return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; + } + return false; +} + +static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, + struct uwb_dev *src, + struct uwb_ie_drp *drp_ie) +{ + struct uwb_rsv *rsv; + struct uwb_pal *pal; + enum uwb_rsv_state state; + + rsv = uwb_rsv_alloc(rc); + if (!rsv) + return NULL; + + rsv->rc = rc; + rsv->owner = src; + uwb_dev_get(rsv->owner); + rsv->target.type = UWB_RSV_TARGET_DEV; + rsv->target.dev = &rc->uwb_dev; + rsv->type = uwb_ie_drp_type(drp_ie); + rsv->stream = uwb_ie_drp_stream_index(drp_ie); + set_bit(rsv->stream, rsv->owner->streams); + uwb_drp_ie_to_bm(&rsv->mas, drp_ie); + + /* + * See if any PALs are interested in this reservation. If not, + * deny the request. + */ + rsv->state = UWB_RSV_STATE_T_DENIED; + spin_lock(&rc->pal_lock); + list_for_each_entry(pal, &rc->pals, node) { + if (pal->new_rsv) + pal->new_rsv(rsv); + if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) + break; + } + spin_unlock(&rc->pal_lock); + + list_add_tail(&rsv->rc_node, &rc->reservations); + state = rsv->state; + rsv->state = UWB_RSV_STATE_NONE; + uwb_rsv_set_state(rsv, state); + + return rsv; +} + +/** + * uwb_rsv_find - find a reservation for a received DRP IE. + * @rc: the radio controller + * @src: source of the DRP IE + * @drp_ie: the DRP IE + * + * If the reservation cannot be found and the DRP IE is from a peer + * attempting to establish a new reservation, create a new reservation + * and add it to the list. + */ +struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, + struct uwb_ie_drp *drp_ie) +{ + struct uwb_rsv *rsv; + + list_for_each_entry(rsv, &rc->reservations, rc_node) { + if (uwb_rsv_match(rsv, src, drp_ie)) + return rsv; + } + + if (uwb_ie_drp_owner(drp_ie)) + return uwb_rsv_new_target(rc, src, drp_ie); + + return NULL; +} + +/* + * Go through all the reservations and check for timeouts and (if + * necessary) update their DRP IEs. + * + * FIXME: look at building the SET_DRP_IE command here rather than + * having to rescan the list in uwb_rc_send_all_drp_ie(). + */ +static bool uwb_rsv_update_all(struct uwb_rc *rc) +{ + struct uwb_rsv *rsv, *t; + bool ie_updated = false; + + list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { + if (rsv->expired) + uwb_drp_handle_timeout(rsv); + if (!rsv->ie_valid) { + uwb_drp_ie_update(rsv); + ie_updated = true; + } + } + + return ie_updated; +} + +void uwb_rsv_sched_update(struct uwb_rc *rc) +{ + queue_work(rc->rsv_workq, &rc->rsv_update_work); +} + +/* + * Update DRP IEs and, if necessary, the DRP Availability IE and send + * the updated IEs to the radio controller. + */ +static void uwb_rsv_update_work(struct work_struct *work) +{ + struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); + bool ie_updated; + + mutex_lock(&rc->rsvs_mutex); + + ie_updated = uwb_rsv_update_all(rc); + + if (!rc->drp_avail.ie_valid) { + uwb_drp_avail_ie_update(rc); + ie_updated = true; + } + + if (ie_updated) + uwb_rc_send_all_drp_ie(rc); + + mutex_unlock(&rc->rsvs_mutex); +} + +static void uwb_rsv_timer(unsigned long arg) +{ + struct uwb_rsv *rsv = (struct uwb_rsv *)arg; + + rsv->expired = true; + uwb_rsv_sched_update(rsv->rc); +} + +void uwb_rsv_init(struct uwb_rc *rc) +{ + INIT_LIST_HEAD(&rc->reservations); + mutex_init(&rc->rsvs_mutex); + INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); + + bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); +} + +int uwb_rsv_setup(struct uwb_rc *rc) +{ + char name[16]; + + snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); + rc->rsv_workq = create_singlethread_workqueue(name); + if (rc->rsv_workq == NULL) + return -ENOMEM; + + return 0; +} + +void uwb_rsv_cleanup(struct uwb_rc *rc) +{ + struct uwb_rsv *rsv, *t; + + mutex_lock(&rc->rsvs_mutex); + list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { + uwb_rsv_remove(rsv); + } + mutex_unlock(&rc->rsvs_mutex); + + cancel_work_sync(&rc->rsv_update_work); + destroy_workqueue(rc->rsv_workq); +} -- cgit v0.10.2 From 599e8d80a2cdf4f65fc49b31b27a49235c78acfe Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:10 +0100 Subject: uwb: add the UWB stack (debug support) Add various debugfs files, principaly for the reservation manager. Signed-off-by: David Vrabel diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c new file mode 100644 index 0000000..6d232c3 --- /dev/null +++ b/drivers/uwb/uwb-debug.c @@ -0,0 +1,367 @@ +/* + * Ultra Wide Band + * Debug support + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: doc + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#define D_LOCAL 0 +#include + +#include "uwb-internal.h" + +void dump_bytes(struct device *dev, const void *_buf, size_t rsize) +{ + const char *buf = _buf; + char line[32]; + size_t offset = 0; + int cnt, cnt2; + for (cnt = 0; cnt < rsize; cnt += 8) { + size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; + for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { + offset += scnprintf(line + offset, sizeof(line) - offset, + "%02x ", buf[cnt + cnt2] & 0xff); + } + if (dev) + dev_info(dev, "%s\n", line); + else + printk(KERN_INFO "%s\n", line); + } +} +EXPORT_SYMBOL_GPL(dump_bytes); + +/* + * Debug interface + * + * Per radio controller debugfs files (in uwb/uwbN/): + * + * command: Flexible command interface (see ). + * + * reservations: information on reservations. + * + * accept: Set to true (Y or 1) to accept reservation requests from + * peers. + * + * drp_avail: DRP availability information. + */ + +struct uwb_dbg { + struct uwb_pal pal; + + u32 accept; + struct list_head rsvs; + + struct dentry *root_d; + struct dentry *command_f; + struct dentry *reservations_f; + struct dentry *accept_f; + struct dentry *drp_avail_f; +}; + +static struct dentry *root_dir; + +static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + struct device *dev = &rc->uwb_dev.dev; + struct uwb_dev_addr devaddr; + char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; + + uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); + if (rsv->target.type == UWB_RSV_TARGET_DEV) + devaddr = rsv->target.dev->dev_addr; + else + devaddr = rsv->target.devaddr; + uwb_dev_addr_print(target, sizeof(target), &devaddr); + + dev_dbg(dev, "debug: rsv %s -> %s: %s\n", + owner, target, uwb_rsv_state_str(rsv->state)); +} + +static int cmd_rsv_establish(struct uwb_rc *rc, + struct uwb_dbg_cmd_rsv_establish *cmd) +{ + struct uwb_mac_addr macaddr; + struct uwb_rsv *rsv; + struct uwb_dev *target; + int ret; + + memcpy(&macaddr, cmd->target, sizeof(macaddr)); + target = uwb_dev_get_by_macaddr(rc, &macaddr); + if (target == NULL) + return -ENODEV; + + rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); + if (rsv == NULL) { + uwb_dev_put(target); + return -ENOMEM; + } + + rsv->owner = &rc->uwb_dev; + rsv->target.type = UWB_RSV_TARGET_DEV; + rsv->target.dev = target; + rsv->type = cmd->type; + rsv->max_mas = cmd->max_mas; + rsv->min_mas = cmd->min_mas; + rsv->sparsity = cmd->sparsity; + + ret = uwb_rsv_establish(rsv); + if (ret) + uwb_rsv_destroy(rsv); + else + list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); + + return ret; +} + +static int cmd_rsv_terminate(struct uwb_rc *rc, + struct uwb_dbg_cmd_rsv_terminate *cmd) +{ + struct uwb_rsv *rsv, *found = NULL; + int i = 0; + + list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { + if (i == cmd->index) { + found = rsv; + break; + } + } + if (!found) + return -EINVAL; + + list_del(&found->pal_node); + uwb_rsv_terminate(found); + + return 0; +} + +static int command_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static ssize_t command_write(struct file *file, const char __user *buf, + size_t len, loff_t *off) +{ + struct uwb_rc *rc = file->private_data; + struct uwb_dbg_cmd cmd; + int ret; + + if (len != sizeof(struct uwb_dbg_cmd)) + return -EINVAL; + + if (copy_from_user(&cmd, buf, len) != 0) + return -EFAULT; + + switch (cmd.type) { + case UWB_DBG_CMD_RSV_ESTABLISH: + ret = cmd_rsv_establish(rc, &cmd.rsv_establish); + break; + case UWB_DBG_CMD_RSV_TERMINATE: + ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); + break; + default: + return -EINVAL; + } + + return ret < 0 ? ret : len; +} + +static struct file_operations command_fops = { + .open = command_open, + .write = command_write, + .read = NULL, + .llseek = no_llseek, + .owner = THIS_MODULE, +}; + +static int reservations_print(struct seq_file *s, void *p) +{ + struct uwb_rc *rc = s->private; + struct uwb_rsv *rsv; + + mutex_lock(&rc->rsvs_mutex); + + list_for_each_entry(rsv, &rc->reservations, rc_node) { + struct uwb_dev_addr devaddr; + char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; + bool is_owner; + char buf[72]; + + uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); + if (rsv->target.type == UWB_RSV_TARGET_DEV) { + devaddr = rsv->target.dev->dev_addr; + is_owner = &rc->uwb_dev == rsv->owner; + } else { + devaddr = rsv->target.devaddr; + is_owner = true; + } + uwb_dev_addr_print(target, sizeof(target), &devaddr); + + seq_printf(s, "%c %s -> %s: %s\n", + is_owner ? 'O' : 'T', + owner, target, uwb_rsv_state_str(rsv->state)); + seq_printf(s, " stream: %d type: %s\n", + rsv->stream, uwb_rsv_type_str(rsv->type)); + bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); + seq_printf(s, " %s\n", buf); + } + + mutex_unlock(&rc->rsvs_mutex); + + return 0; +} + +static int reservations_open(struct inode *inode, struct file *file) +{ + return single_open(file, reservations_print, inode->i_private); +} + +static struct file_operations reservations_fops = { + .open = reservations_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int drp_avail_print(struct seq_file *s, void *p) +{ + struct uwb_rc *rc = s->private; + char buf[72]; + + bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS); + seq_printf(s, "global: %s\n", buf); + bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS); + seq_printf(s, "local: %s\n", buf); + bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS); + seq_printf(s, "pending: %s\n", buf); + + return 0; +} + +static int drp_avail_open(struct inode *inode, struct file *file) +{ + return single_open(file, drp_avail_print, inode->i_private); +} + +static struct file_operations drp_avail_fops = { + .open = drp_avail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) +{ + struct uwb_rc *rc = rsv->rc; + + if (rc->dbg->accept) + uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); +} + +/** + * uwb_dbg_add_rc - add a debug interface for a radio controller + * @rc: the radio controller + */ +void uwb_dbg_add_rc(struct uwb_rc *rc) +{ + rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL); + if (rc->dbg == NULL) + return; + + INIT_LIST_HEAD(&rc->dbg->rsvs); + + uwb_pal_init(&rc->dbg->pal); + rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; + uwb_pal_register(rc, &rc->dbg->pal); + if (root_dir) { + rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), + root_dir); + rc->dbg->command_f = debugfs_create_file("command", 0200, + rc->dbg->root_d, rc, + &command_fops); + rc->dbg->reservations_f = debugfs_create_file("reservations", 0444, + rc->dbg->root_d, rc, + &reservations_fops); + rc->dbg->accept_f = debugfs_create_bool("accept", 0644, + rc->dbg->root_d, + &rc->dbg->accept); + rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444, + rc->dbg->root_d, rc, + &drp_avail_fops); + } +} + +/** + * uwb_dbg_add_rc - remove a radio controller's debug interface + * @rc: the radio controller + */ +void uwb_dbg_del_rc(struct uwb_rc *rc) +{ + struct uwb_rsv *rsv, *t; + + if (rc->dbg == NULL) + return; + + list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { + uwb_rsv_destroy(rsv); + } + + uwb_pal_unregister(rc, &rc->dbg->pal); + + if (root_dir) { + debugfs_remove(rc->dbg->drp_avail_f); + debugfs_remove(rc->dbg->accept_f); + debugfs_remove(rc->dbg->reservations_f); + debugfs_remove(rc->dbg->command_f); + debugfs_remove(rc->dbg->root_d); + } +} + +/** + * uwb_dbg_exit - initialize the debug interface sub-module + */ +void uwb_dbg_init(void) +{ + root_dir = debugfs_create_dir("uwb", NULL); +} + +/** + * uwb_dbg_exit - clean-up the debug interface sub-module + */ +void uwb_dbg_exit(void) +{ + debugfs_remove(root_dir); +} -- cgit v0.10.2 From 2f86c3e67d6423d6d23ee2f737ad4f0730435742 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 17 Sep 2008 16:34:11 +0100 Subject: uwb: add the UWB stack (build system) The Kbuild and Kconfig files. Signed-off-by: Greg Kroah-Hartman Signed-off-by: David Vrabel diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 70dba16..8eedbfa 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1220,6 +1220,8 @@ source "drivers/hid/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + source "drivers/mmc/Kconfig" source "drivers/leds/Kconfig" diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 9389d38..cb66c4d 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -677,6 +677,8 @@ source "fs/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + source "arch/cris/Kconfig.debug" source "security/Kconfig" diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 396ab05..2b41332 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -213,6 +213,8 @@ source "drivers/hwmon/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + endmenu source "fs/Kconfig" diff --git a/drivers/Kconfig b/drivers/Kconfig index 59f33fa..9b399a1 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -78,6 +78,8 @@ source "drivers/hid/Kconfig" source "drivers/usb/Kconfig" +source "drivers/uwb/Kconfig" + source "drivers/mmc/Kconfig" source "drivers/memstick/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 2735bde..1a6930f 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -99,3 +99,4 @@ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_VIRTIO) += virtio/ obj-$(CONFIG_REGULATOR) += regulator/ +obj-$(CONFIG_UWB) += uwb/ diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig new file mode 100644 index 0000000..3f01a69 --- /dev/null +++ b/drivers/uwb/Kconfig @@ -0,0 +1,28 @@ +# +# UWB device configuration +# + +menuconfig UWB + tristate "Ultra Wide Band devices" + depends on PCI + default n + help + UWB is a high-bandwidth, low-power, point-to-point radio + technology using a wide spectrum (3.1-10.6GHz). It is + optimized for in-room use (480Mbps at 2 meters, 110Mbps at + 10m). It serves as the transport layer for other protocols, + such as Wireless USB (WUSB), IP (WLP) and upcoming + Bluetooth and 1394 + + The topology is peer to peer; however, higher level + protocols (such as WUSB) might impose a master/slave + relationship. + + Say Y here if your computer has UWB radio controllers (USB or PCI) + based. You will need to enable the radio controllers + below. It is ok to select all of them, no harm done. + + For more help check the UWB and WUSB related files in + . + + To compile the UWB stack as a module, choose M here. diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile new file mode 100644 index 0000000..9a67be5 --- /dev/null +++ b/drivers/uwb/Makefile @@ -0,0 +1,20 @@ +obj-$(CONFIG_UWB) += uwb.o + +uwb-objs := \ + address.o \ + beacon.o \ + driver.o \ + drp.o \ + drp-avail.o \ + drp-ie.o \ + est.o \ + ie.o \ + lc-dev.o \ + lc-rc.o \ + neh.o \ + pal.o \ + reset.o \ + rsv.o \ + scan.o \ + uwb-debug.o \ + uwbd.o -- cgit v0.10.2 From da389eac31be24556a71dd59ea6539ae4cba5c15 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:12 +0100 Subject: uwb: add the umc bus The UMC bus is used for the capabilities exposed by a UWB Multi-interface Controller as described in the WHCI specification. Signed-off-by: David Vrabel diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index 9a67be5..41c9fca 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_UWB) += uwb.o +obj-$(CONFIG_UWB_WHCI) += umc.o uwb-objs := \ address.o \ @@ -18,3 +19,8 @@ uwb-objs := \ scan.o \ uwb-debug.o \ uwbd.o + +umc-objs := \ + umc-bus.o \ + umc-dev.o \ + umc-drv.o diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c new file mode 100644 index 0000000..2d8d62d --- /dev/null +++ b/drivers/uwb/umc-bus.c @@ -0,0 +1,218 @@ +/* + * Bus for UWB Multi-interface Controller capabilities. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GNU GPL v2. + */ +#include +#include +#include +#include +#include + +static int umc_bus_unbind_helper(struct device *dev, void *data) +{ + struct device *parent = data; + + if (dev->parent == parent && dev->driver) + device_release_driver(dev); + return 0; +} + +/** + * umc_controller_reset - reset the whole UMC controller + * @umc: the UMC device for the radio controller. + * + * Drivers will be unbound from all UMC devices belonging to the + * controller and then the radio controller will be rebound. The + * radio controller is expected to do a full hardware reset when it is + * probed. + * + * If this is called while a probe() or remove() is in progress it + * will return -EAGAIN and not perform the reset. + */ +int umc_controller_reset(struct umc_dev *umc) +{ + struct device *parent = umc->dev.parent; + int ret; + + if (down_trylock(&parent->sem)) + return -EAGAIN; + bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper); + ret = device_attach(&umc->dev); + if (ret == 1) + ret = 0; + up(&parent->sem); + + return ret; +} +EXPORT_SYMBOL_GPL(umc_controller_reset); + +/** + * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device. + * @umc_drv: umc driver with match_data pointing to a zero-terminated + * table of pci_device_id's. + * @umc: umc device whose parent is to be matched. + */ +int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc) +{ + const struct pci_device_id *id_table = umc_drv->match_data; + struct pci_dev *pci; + + if (umc->dev.parent->bus != &pci_bus_type) + return 0; + + pci = to_pci_dev(umc->dev.parent); + return pci_match_id(id_table, pci) != NULL; +} +EXPORT_SYMBOL_GPL(umc_match_pci_id); + +static int umc_bus_rescan_helper(struct device *dev, void *data) +{ + int ret = 0; + + if (!dev->driver) + ret = device_attach(dev); + + return ret < 0 ? ret : 0; +} + +static void umc_bus_rescan(void) +{ + int err; + + /* + * We can't use bus_rescan_devices() here as it deadlocks when + * it tries to retake the dev->parent semaphore. + */ + err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper); + if (err < 0) + printk(KERN_WARNING "%s: rescan of bus failed: %d\n", + KBUILD_MODNAME, err); +} + +static int umc_bus_match(struct device *dev, struct device_driver *drv) +{ + struct umc_dev *umc = to_umc_dev(dev); + struct umc_driver *umc_driver = to_umc_driver(drv); + + if (umc->cap_id == umc_driver->cap_id) { + if (umc_driver->match) + return umc_driver->match(umc_driver, umc); + else + return 1; + } + return 0; +} + +static int umc_device_probe(struct device *dev) +{ + struct umc_dev *umc; + struct umc_driver *umc_driver; + int err; + + umc_driver = to_umc_driver(dev->driver); + umc = to_umc_dev(dev); + + get_device(dev); + err = umc_driver->probe(umc); + if (err) + put_device(dev); + else + umc_bus_rescan(); + + return err; +} + +static int umc_device_remove(struct device *dev) +{ + struct umc_dev *umc; + struct umc_driver *umc_driver; + + umc_driver = to_umc_driver(dev->driver); + umc = to_umc_dev(dev); + + umc_driver->remove(umc); + put_device(dev); + return 0; +} + +static int umc_device_suspend(struct device *dev, pm_message_t state) +{ + struct umc_dev *umc; + struct umc_driver *umc_driver; + int err = 0; + + umc = to_umc_dev(dev); + + if (dev->driver) { + umc_driver = to_umc_driver(dev->driver); + if (umc_driver->suspend) + err = umc_driver->suspend(umc, state); + } + return err; +} + +static int umc_device_resume(struct device *dev) +{ + struct umc_dev *umc; + struct umc_driver *umc_driver; + int err = 0; + + umc = to_umc_dev(dev); + + if (dev->driver) { + umc_driver = to_umc_driver(dev->driver); + if (umc_driver->resume) + err = umc_driver->resume(umc); + } + return err; +} + +static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct umc_dev *umc = to_umc_dev(dev); + + return sprintf(buf, "0x%02x\n", umc->cap_id); +} + +static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct umc_dev *umc = to_umc_dev(dev); + + return sprintf(buf, "0x%04x\n", umc->version); +} + +static struct device_attribute umc_dev_attrs[] = { + __ATTR_RO(capability_id), + __ATTR_RO(version), + __ATTR_NULL, +}; + +struct bus_type umc_bus_type = { + .name = "umc", + .match = umc_bus_match, + .probe = umc_device_probe, + .remove = umc_device_remove, + .suspend = umc_device_suspend, + .resume = umc_device_resume, + .dev_attrs = umc_dev_attrs, +}; +EXPORT_SYMBOL_GPL(umc_bus_type); + +static int __init umc_bus_init(void) +{ + return bus_register(&umc_bus_type); +} +module_init(umc_bus_init); + +static void __exit umc_bus_exit(void) +{ + bus_unregister(&umc_bus_type); +} +module_exit(umc_bus_exit); + +MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus"); +MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c new file mode 100644 index 0000000..aa44e1c --- /dev/null +++ b/drivers/uwb/umc-dev.c @@ -0,0 +1,104 @@ +/* + * UWB Multi-interface Controller device management. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GNU GPL v2. + */ +#include +#include +#define D_LOCAL 0 +#include + +static void umc_device_release(struct device *dev) +{ + struct umc_dev *umc = to_umc_dev(dev); + + kfree(umc); +} + +/** + * umc_device_create - allocate a child UMC device + * @parent: parent of the new UMC device. + * @n: index of the new device. + * + * The new UMC device will have a bus ID of the parent with '-n' + * appended. + */ +struct umc_dev *umc_device_create(struct device *parent, int n) +{ + struct umc_dev *umc; + + umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); + if (umc) { + snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d", + parent->bus_id, n); + umc->dev.parent = parent; + umc->dev.bus = &umc_bus_type; + umc->dev.release = umc_device_release; + + umc->dev.dma_mask = parent->dma_mask; + } + return umc; +} +EXPORT_SYMBOL_GPL(umc_device_create); + +/** + * umc_device_register - register a UMC device + * @umc: pointer to the UMC device + * + * The memory resource for the UMC device is acquired and the device + * registered with the system. + */ +int umc_device_register(struct umc_dev *umc) +{ + int err; + + d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc); + + err = request_resource(umc->resource.parent, &umc->resource); + if (err < 0) { + dev_err(&umc->dev, "can't allocate resource range " + "%016Lx to %016Lx: %d\n", + (unsigned long long)umc->resource.start, + (unsigned long long)umc->resource.end, + err); + goto error_request_resource; + } + + err = device_register(&umc->dev); + if (err < 0) + goto error_device_register; + d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc); + return 0; + +error_device_register: + release_resource(&umc->resource); +error_request_resource: + d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err); + return err; +} +EXPORT_SYMBOL_GPL(umc_device_register); + +/** + * umc_device_unregister - unregister a UMC device + * @umc: pointer to the UMC device + * + * First we unregister the device, make sure the driver can do it's + * resource release thing and then we try to release any left over + * resources. We take a ref to the device, to make sure it doesn't + * dissapear under our feet. + */ +void umc_device_unregister(struct umc_dev *umc) +{ + struct device *dev; + if (!umc) + return; + dev = get_device(&umc->dev); + d_fnstart(3, dev, "(umc_dev %p)\n", umc); + device_unregister(&umc->dev); + release_resource(&umc->resource); + d_fnend(3, dev, "(umc_dev %p) = void\n", umc); + put_device(dev); +} +EXPORT_SYMBOL_GPL(umc_device_unregister); diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c new file mode 100644 index 0000000..367b5eb8 --- /dev/null +++ b/drivers/uwb/umc-drv.c @@ -0,0 +1,31 @@ +/* + * UWB Multi-interface Controller driver management. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GNU GPL v2. + */ +#include +#include + +int __umc_driver_register(struct umc_driver *umc_drv, struct module *module, + const char *mod_name) +{ + umc_drv->driver.name = umc_drv->name; + umc_drv->driver.owner = module; + umc_drv->driver.mod_name = mod_name; + umc_drv->driver.bus = &umc_bus_type; + + return driver_register(&umc_drv->driver); +} +EXPORT_SYMBOL_GPL(__umc_driver_register); + +/** + * umc_driver_register - unregister a UMC capabiltity driver. + * @umc_drv: pointer to the driver. + */ +void umc_driver_unregister(struct umc_driver *umc_drv) +{ + driver_unregister(&umc_drv->driver); +} +EXPORT_SYMBOL_GPL(umc_driver_unregister); diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h new file mode 100644 index 0000000..36a39e3 --- /dev/null +++ b/include/linux/uwb/umc.h @@ -0,0 +1,194 @@ +/* + * UWB Multi-interface Controller support. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GPLv2 + * + * UMC (UWB Multi-interface Controller) capabilities (e.g., radio + * controller, host controller) are presented as devices on the "umc" + * bus. + * + * The radio controller is not strictly a UMC capability but it's + * useful to present it as such. + * + * References: + * + * [WHCI] Wireless Host Controller Interface Specification for + * Certified Wireless Universal Serial Bus, revision 0.95. + * + * How this works is kind of convoluted but simple. The whci.ko driver + * loads when WHCI devices are detected. These WHCI devices expose + * many devices in the same PCI function (they couldn't have reused + * functions, no), so for each PCI function that exposes these many + * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] + * with umc_device_create() and adds it to the bus with + * umc_device_register(). + * + * umc_device_register() calls device_register() which will push the + * bus management code to load your UMC driver's somehting_probe() + * that you have registered for that capability code. + * + * Now when the WHCI device is removed, whci_remove() will go over + * each umc_dev assigned to each of the PCI function's capabilities + * and through whci_del_cap() call umc_device_unregister() each + * created umc_dev. Of course, if you are bound to the device, your + * driver's something_remove() will be called. + */ + +#ifndef _LINUX_UWB_UMC_H_ +#define _LINUX_UWB_UMC_H_ + +#include +#include + +/* + * UMC capability IDs. + * + * 0x00 is reserved so use it for the radio controller device. + * + * [WHCI] table 2-8 + */ +#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ +#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ + +/** + * struct umc_dev - UMC capability device + * + * @version: version of the specification this capability conforms to. + * @cap_id: capability ID. + * @bar: PCI Bar (64 bit) where the resource lies + * @resource: register space resource. + * @irq: interrupt line. + */ +struct umc_dev { + u16 version; + u8 cap_id; + u8 bar; + struct resource resource; + unsigned irq; + struct device dev; +}; + +#define to_umc_dev(d) container_of(d, struct umc_dev, dev) + +/** + * struct umc_driver - UMC capability driver + * @cap_id: supported capability ID. + * @match: driver specific capability matching function. + * @match_data: driver specific data for match() (e.g., a + * table of pci_device_id's if umc_match_pci_id() is used). + */ +struct umc_driver { + char *name; + u8 cap_id; + int (*match)(struct umc_driver *, struct umc_dev *); + const void *match_data; + + int (*probe)(struct umc_dev *); + void (*remove)(struct umc_dev *); + int (*suspend)(struct umc_dev *, pm_message_t state); + int (*resume)(struct umc_dev *); + + struct device_driver driver; +}; + +#define to_umc_driver(d) container_of(d, struct umc_driver, driver) + +extern struct bus_type umc_bus_type; + +struct umc_dev *umc_device_create(struct device *parent, int n); +int __must_check umc_device_register(struct umc_dev *umc); +void umc_device_unregister(struct umc_dev *umc); + +int __must_check __umc_driver_register(struct umc_driver *umc_drv, + struct module *mod, + const char *mod_name); + +/** + * umc_driver_register - register a UMC capabiltity driver. + * @umc_drv: pointer to the driver. + */ +static inline int __must_check umc_driver_register(struct umc_driver *umc_drv) +{ + return __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME); +} +void umc_driver_unregister(struct umc_driver *umc_drv); + +/* + * Utility function you can use to match (umc_driver->match) against a + * null-terminated array of 'struct pci_device_id' in + * umc_driver->match_data. + */ +int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); + +/** + * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none + * @umc_dev: UMC device whose parent PCI device we are looking for + * + * DIRTY!!! DON'T RELY ON THIS + * + * FIXME: This is as dirty as it gets, but we need some way to check + * the correct type of umc_dev->parent (so that for example, we can + * cast to pci_dev). Casting to pci_dev is necesary because at some + * point we need to request resources from the device. Mapping is + * easily over come (ioremap and stuff are bus agnostic), but hooking + * up to some error handlers (such as pci error handlers) might need + * this. + * + * THIS might (probably will) be removed in the future, so don't count + * on it. + */ +static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) +{ + struct pci_dev *pci_dev = NULL; + if (umc_dev->dev.parent->bus == &pci_bus_type) + pci_dev = to_pci_dev(umc_dev->dev.parent); + return pci_dev; +} + +/** + * umc_dev_get() - reference a UMC device. + * @umc_dev: Pointer to UMC device. + * + * NOTE: we are assuming in this whole scheme that the parent device + * is referenced at _probe() time and unreferenced at _remove() + * time by the parent's subsystem. + */ +static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) +{ + get_device(&umc_dev->dev); + return umc_dev; +} + +/** + * umc_dev_put() - unreference a UMC device. + * @umc_dev: Pointer to UMC device. + */ +static inline void umc_dev_put(struct umc_dev *umc_dev) +{ + put_device(&umc_dev->dev); +} + +/** + * umc_set_drvdata - set UMC device's driver data. + * @umc_dev: Pointer to UMC device. + * @data: Data to set. + */ +static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) +{ + dev_set_drvdata(&umc_dev->dev, data); +} + +/** + * umc_get_drvdata - recover UMC device's driver data. + * @umc_dev: Pointer to UMC device. + */ +static inline void *umc_get_drvdata(struct umc_dev *umc_dev) +{ + return dev_get_drvdata(&umc_dev->dev); +} + +int umc_controller_reset(struct umc_dev *umc); + +#endif /* #ifndef _LINUX_UWB_UMC_H_ */ -- cgit v0.10.2 From 8f1b678ab900c2bda1620dfb6e1f1f02604fc3a2 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:13 +0100 Subject: uwb: add the driver to enumerate WHCI capabilities This enumerates the capabilties of a WHCI device, adding a umc device for each one. Signed-off-by: David Vrabel diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index 41c9fca..b054471 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_UWB) += uwb.o -obj-$(CONFIG_UWB_WHCI) += umc.o +obj-$(CONFIG_UWB_WHCI) += umc.o whci.o uwb-objs := \ address.o \ diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c new file mode 100644 index 0000000..3df2388 --- /dev/null +++ b/drivers/uwb/whci.c @@ -0,0 +1,269 @@ +/* + * WHCI UWB Multi-interface Controller enumerator. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This file is released under the GNU GPL v2. + */ +#include +#include +#include +#include +#include +#include + +struct whci_card { + struct pci_dev *pci; + void __iomem *uwbbase; + u8 n_caps; + struct umc_dev *devs[0]; +}; + + +/* Fix faulty HW :( */ +static +u64 whci_capdata_quirks(struct whci_card *card, u64 capdata) +{ + u64 capdata_orig = capdata; + struct pci_dev *pci_dev = card->pci; + if (pci_dev->vendor == PCI_VENDOR_ID_INTEL + && (pci_dev->device == 0x0c3b || pci_dev->device == 0004) + && pci_dev->class == 0x0d1010) { + switch (UWBCAPDATA_TO_CAP_ID(capdata)) { + /* WLP capability has 0x100 bytes of aperture */ + case 0x80: + capdata |= 0x40 << 8; break; + /* WUSB capability has 0x80 bytes of aperture + * and ID is 1 */ + case 0x02: + capdata &= ~0xffff; + capdata |= 0x2001; + break; + } + } + if (capdata_orig != capdata) + dev_warn(&pci_dev->dev, + "PCI v%04x d%04x c%06x#%02x: " + "corrected capdata from %016Lx to %016Lx\n", + pci_dev->vendor, pci_dev->device, pci_dev->class, + (unsigned)UWBCAPDATA_TO_CAP_ID(capdata), + (unsigned long long)capdata_orig, + (unsigned long long)capdata); + return capdata; +} + + +/** + * whci_wait_for - wait for a WHCI register to be set + * + * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'. + */ +int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result, + unsigned long max_ms, const char *tag) +{ + unsigned t = 0; + u32 val; + for (;;) { + val = le_readl(reg); + if ((val & mask) == result) + break; + msleep(10); + if (t >= max_ms) { + dev_err(dev, "timed out waiting for %s ", tag); + return -ETIMEDOUT; + } + t += 10; + } + return 0; +} +EXPORT_SYMBOL_GPL(whci_wait_for); + + +/* + * NOTE: the capinfo and capdata registers are slightly different + * (size and cap-id fields). So for cap #0, we need to fill + * in. Size comes from the size of the register block + * (statically calculated); cap_id comes from nowhere, we use + * zero, that is reserved, for the radio controller, because + * none was defined at the spec level. + */ +static int whci_add_cap(struct whci_card *card, int n) +{ + struct umc_dev *umc; + u64 capdata; + int bar, err; + + umc = umc_device_create(&card->pci->dev, n); + if (umc == NULL) + return -ENOMEM; + + capdata = le_readq(card->uwbbase + UWBCAPDATA(n)); + + bar = UWBCAPDATA_TO_BAR(capdata) << 1; + + capdata = whci_capdata_quirks(card, capdata); + /* Capability 0 is the radio controller. It's size is 32 + * bytes (WHCI0.95[2.3, T2-9]). */ + umc->version = UWBCAPDATA_TO_VERSION(capdata); + umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata); + umc->bar = bar; + umc->resource.start = pci_resource_start(card->pci, bar) + + UWBCAPDATA_TO_OFFSET(capdata); + umc->resource.end = umc->resource.start + + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; + umc->resource.name = umc->dev.bus_id; + umc->resource.flags = card->pci->resource[bar].flags; + umc->resource.parent = &card->pci->resource[bar]; + umc->irq = card->pci->irq; + + err = umc_device_register(umc); + if (err < 0) + goto error; + card->devs[n] = umc; + return 0; + +error: + kfree(umc); + return err; +} + +static void whci_del_cap(struct whci_card *card, int n) +{ + struct umc_dev *umc = card->devs[n]; + + if (umc != NULL) + umc_device_unregister(umc); +} + +static int whci_n_caps(struct pci_dev *pci) +{ + void __iomem *uwbbase; + u64 capinfo; + + uwbbase = pci_iomap(pci, 0, 8); + if (!uwbbase) + return -ENOMEM; + capinfo = le_readq(uwbbase + UWBCAPINFO); + pci_iounmap(pci, uwbbase); + + return UWBCAPINFO_TO_N_CAPS(capinfo); +} + +static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct whci_card *card; + int err, n_caps, n; + + err = pci_enable_device(pci); + if (err < 0) + goto error; + pci_enable_msi(pci); + pci_set_master(pci); + err = -ENXIO; + if (!pci_set_dma_mask(pci, DMA_64BIT_MASK)) + pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK); + else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK)) + pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK); + else + goto error_dma; + + err = n_caps = whci_n_caps(pci); + if (n_caps < 0) + goto error_ncaps; + + err = -ENOMEM; + card = kzalloc(sizeof(struct whci_card) + + sizeof(struct whci_dev *) * (n_caps + 1), + GFP_KERNEL); + if (card == NULL) + goto error_kzalloc; + card->pci = pci; + card->n_caps = n_caps; + + err = -EBUSY; + if (!request_mem_region(pci_resource_start(pci, 0), + UWBCAPDATA_SIZE(card->n_caps), + "whci (capability data)")) + goto error_request_memregion; + err = -ENOMEM; + card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps)); + if (!card->uwbbase) + goto error_iomap; + + /* Add each capability. */ + for (n = 0; n <= card->n_caps; n++) { + err = whci_add_cap(card, n); + if (err < 0 && n == 0) { + dev_err(&pci->dev, "cannot bind UWB radio controller:" + " %d\n", err); + goto error_bind; + } + if (err < 0) + dev_warn(&pci->dev, "warning: cannot bind capability " + "#%u: %d\n", n, err); + } + pci_set_drvdata(pci, card); + return 0; + +error_bind: + pci_iounmap(pci, card->uwbbase); +error_iomap: + release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); +error_request_memregion: + kfree(card); +error_kzalloc: +error_ncaps: +error_dma: + pci_disable_msi(pci); + pci_disable_device(pci); +error: + return err; +} + +static void whci_remove(struct pci_dev *pci) +{ + struct whci_card *card = pci_get_drvdata(pci); + int n; + + pci_set_drvdata(pci, NULL); + /* Unregister each capability in reverse (so the master device + * is unregistered last). */ + for (n = card->n_caps; n >= 0 ; n--) + whci_del_cap(card, n); + pci_iounmap(pci, card->uwbbase); + release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps)); + kfree(card); + pci_disable_msi(pci); + pci_disable_device(pci); +} + +static struct pci_device_id whci_id_table[] = { + { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, whci_id_table); + + +static struct pci_driver whci_driver = { + .name = "whci", + .id_table = whci_id_table, + .probe = whci_probe, + .remove = whci_remove, +}; + +static int __init whci_init(void) +{ + return pci_register_driver(&whci_driver); +} + +static void __exit whci_exit(void) +{ + pci_unregister_driver(&whci_driver); +} + +module_init(whci_init); +module_exit(whci_exit); + +MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator"); +MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); +MODULE_LICENSE("GPL"); diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h new file mode 100644 index 0000000..915ec23 --- /dev/null +++ b/include/linux/uwb/whci.h @@ -0,0 +1,117 @@ +/* + * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * + * References: + * [WHCI] Wireless Host Controller Interface Specification for + * Certified Wireless Universal Serial Bus, revision 0.95. + */ +#ifndef _LINUX_UWB_WHCI_H_ +#define _LINUX_UWB_WHCI_H_ + +#include + +/* + * UWB interface capability registers (offsets from UWBBASE) + * + * [WHCI] section 2.2 + */ +#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ +# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) +#define UWBCAPDATA(n) (8*(n)) +# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) +# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) +# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) +# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) +# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) + +/* Size of the WHCI capability data (including the RC capability) for + a device with n capabilities. */ +#define UWBCAPDATA_SIZE(n) (8 + 8*(n)) + + +/* + * URC registers (offsets from URCBASE) + * + * [WHCI] section 2.3 + */ +#define URCCMD 0x00 +# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ +# define URCCMD_RS (1 << 30) /* Run/Stop */ +# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ +# define URCCMD_ACTIVE (1 << 15) /* Command is active */ +# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ +# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ +#define URCSTS 0x04 +# define URCSTS_EPS (1 << 17) /* Event Processing Status */ +# define URCSTS_HALTED (1 << 16) /* RC halted */ +# define URCSTS_HSE (1 << 10) /* Host System Error...fried */ +# define URCSTS_ER (1 << 9) /* Event Ready */ +# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ +# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ +# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ +#define URCINTR 0x08 +# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ +#define URCCMDADDR 0x10 +#define URCEVTADDR 0x18 +# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ + + +/** Write 32 bit @value to little endian register at @addr */ +static inline +void le_writel(u32 value, void __iomem *addr) +{ + iowrite32(value, addr); +} + + +/** Read from 32 bit little endian register at @addr */ +static inline +u32 le_readl(void __iomem *addr) +{ + return ioread32(addr); +} + + +/** Write 64 bit @value to little endian register at @addr */ +static inline +void le_writeq(u64 value, void __iomem *addr) +{ + iowrite32(value, addr); + iowrite32(value >> 32, addr + 4); +} + + +/** Read from 64 bit little endian register at @addr */ +static inline +u64 le_readq(void __iomem *addr) +{ + u64 value; + value = ioread32(addr); + value |= (u64)ioread32(addr + 4) << 32; + return value; +} + +extern int whci_wait_for(struct device *dev, u32 __iomem *reg, + u32 mask, u32 result, + unsigned long max_ms, const char *tag); + +#endif /* #ifndef _LINUX_UWB_WHCI_H_ */ -- cgit v0.10.2 From b6e069830c5927fd4d5fce67cb6440fddd10d429 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:14 +0100 Subject: uwb: add whc-rc radio control driver Add the driver for WHCI radio controllers. Signed-off-by: David Vrabel diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig index 3f01a69..c0eb973 100644 --- a/drivers/uwb/Kconfig +++ b/drivers/uwb/Kconfig @@ -26,3 +26,23 @@ menuconfig UWB . To compile the UWB stack as a module, choose M here. + +if UWB + +config UWB_WHCI + tristate "UWB Radio Control driver for WHCI-compliant cards" + depends on PCI + help + This driver enables the radio controller for WHCI cards. + + WHCI is an specification developed by Intel + (http://www.intel.com/technology/comms/wusb/whci.htm) much + in the spirit of USB's EHCI, but for UWB and Wireless USB + radio/host controllers connected via memmory mapping (eg: + PCI). Most of these cards come also with a Wireless USB host + controller. + + To compile this driver select Y (built in) or M (module). It + is safe to select any even if you do not have the hardware. + +endif # UWB diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index b054471..bdcb494 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_UWB) += uwb.o -obj-$(CONFIG_UWB_WHCI) += umc.o whci.o +obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o uwb-objs := \ address.o \ diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c new file mode 100644 index 0000000..5a93abe --- /dev/null +++ b/drivers/uwb/whc-rc.c @@ -0,0 +1,528 @@ +/* + * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3]) + * Radio Control command/event transport to the UWB stack + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Initialize and hook up the Radio Control interface. + * + * For each device probed, creates an 'struct whcrc' which contains + * just the representation of the UWB Radio Controller, and the logic + * for reading notifications and passing them to the UWB Core. + * + * So we initialize all of those, register the UWB Radio Controller + * and setup the notification/event handle to pipe the notifications + * to the UWB management Daemon. + * + * Once uwb_rc_add() is called, the UWB stack takes control, resets + * the radio and readies the device to take commands the UWB + * API/user-space. + * + * Note this driver is just a transport driver; the commands are + * formed at the UWB stack and given to this driver who will deliver + * them to the hw and transfer the replies/notifications back to the + * UWB stack through the UWB daemon (UWBD). + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "uwb-internal.h" + +#define D_LOCAL 0 +#include + +/** + * Descriptor for an instance of the UWB Radio Control Driver that + * attaches to the URC interface of the WHCI PCI card. + * + * Unless there is a lock specific to the 'data members', all access + * is protected by uwb_rc->mutex. + */ +struct whcrc { + struct umc_dev *umc_dev; + struct uwb_rc *uwb_rc; /* UWB host controller */ + + unsigned long area; + void __iomem *rc_base; + size_t rc_len; + spinlock_t irq_lock; + + void *evt_buf, *cmd_buf; + dma_addr_t evt_dma_buf, cmd_dma_buf; + wait_queue_head_t cmd_wq; + struct work_struct event_work; +}; + +/** + * Execute an UWB RC command on WHCI/RC + * + * @rc: Instance of a Radio Controller that is a whcrc + * @cmd: Buffer containing the RCCB and payload to execute + * @cmd_size: Size of the command buffer. + * + * We copy the command into whcrc->cmd_buf (as it is pretty and + * aligned`and physically contiguous) and then press the right keys in + * the controller's URCCMD register to get it to read it. We might + * have to wait for the cmd_sem to be open to us. + * + * NOTE: rc's mutex has to be locked + */ +static int whcrc_cmd(struct uwb_rc *uwb_rc, + const struct uwb_rccb *cmd, size_t cmd_size) +{ + int result = 0; + struct whcrc *whcrc = uwb_rc->priv; + struct device *dev = &whcrc->umc_dev->dev; + u32 urccmd; + + d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size); + might_sleep(); + + if (cmd_size >= 4096) { + result = -E2BIG; + goto error; + } + + /* + * If the URC is halted, then the hardware has reset itself. + * Attempt to recover by restarting the device and then return + * an error as it's likely that the current command isn't + * valid for a newly started RC. + */ + if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { + dev_err(dev, "requesting reset of halted radio controller\n"); + uwb_rc_reset_all(uwb_rc); + result = -EIO; + goto error; + } + + result = wait_event_timeout(whcrc->cmd_wq, + !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); + if (result == 0) { + dev_err(dev, "device is not ready to execute commands\n"); + result = -ETIMEDOUT; + goto error; + } + + memmove(whcrc->cmd_buf, cmd, cmd_size); + le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR); + + spin_lock(&whcrc->irq_lock); + urccmd = le_readl(whcrc->rc_base + URCCMD); + urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK); + le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size, + whcrc->rc_base + URCCMD); + spin_unlock(&whcrc->irq_lock); + +error: + d_fnend(3, dev, "(%p, %p, %zu) = %d\n", + uwb_rc, cmd, cmd_size, result); + return result; +} + +static int whcrc_reset(struct uwb_rc *rc) +{ + struct whcrc *whcrc = rc->priv; + + return umc_controller_reset(whcrc->umc_dev); +} + +/** + * Reset event reception mechanism and tell hw we are ready to get more + * + * We have read all the events in the event buffer, so we are ready to + * reset it to the beginning. + * + * This is only called during initialization or after an event buffer + * has been retired. This means we can be sure that event processing + * is disabled and it's safe to update the URCEVTADDR register. + * + * There's no need to wait for the event processing to start as the + * URC will not clear URCCMD_ACTIVE until (internal) event buffer + * space is available. + */ +static +void whcrc_enable_events(struct whcrc *whcrc) +{ + struct device *dev = &whcrc->umc_dev->dev; + u32 urccmd; + + d_fnstart(4, dev, "(whcrc %p)\n", whcrc); + + le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); + + spin_lock(&whcrc->irq_lock); + urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; + le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); + spin_unlock(&whcrc->irq_lock); + + d_fnend(4, dev, "(whcrc %p) = void\n", whcrc); +} + +static void whcrc_event_work(struct work_struct *work) +{ + struct whcrc *whcrc = container_of(work, struct whcrc, event_work); + struct device *dev = &whcrc->umc_dev->dev; + size_t size; + u64 urcevtaddr; + + urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); + size = urcevtaddr & URCEVTADDR_OFFSET_MASK; + + d_printf(3, dev, "received %zu octet event\n", size); + d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size); + + uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); + whcrc_enable_events(whcrc); +} + +/** + * Catch interrupts? + * + * We ack inmediately (and expect the hw to do the right thing and + * raise another IRQ if things have changed :) + */ +static +irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) +{ + struct whcrc *whcrc = _whcrc; + struct device *dev = &whcrc->umc_dev->dev; + u32 urcsts; + + d_fnstart(4, dev, "irq %d _whcrc %p)\n", irq, _whcrc); + urcsts = le_readl(whcrc->rc_base + URCSTS); + if (!(urcsts & URCSTS_INT_MASK)) + return IRQ_NONE; + le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); + + d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n", + le_readl(whcrc->rc_base + URCSTS), urcsts); + + if (whcrc->uwb_rc == NULL) { + if (printk_ratelimit()) + dev_dbg(dev, "Received interrupt when not yet " + "ready!\n"); + goto out; + } + + if (urcsts & URCSTS_HSE) { + dev_err(dev, "host system error -- hardware halted\n"); + /* FIXME: do something sensible here */ + goto out; + } + if (urcsts & URCSTS_ER) { + d_printf(3, dev, "ER: event ready\n"); + schedule_work(&whcrc->event_work); + } + if (urcsts & URCSTS_RCI) { + d_printf(3, dev, "RCI: ready to execute another command\n"); + wake_up_all(&whcrc->cmd_wq); + } +out: + return IRQ_HANDLED; +} + + +/** + * Initialize a UMC RC interface: map regions, get (shared) IRQ + */ +static +int whcrc_setup_rc_umc(struct whcrc *whcrc) +{ + int result = 0; + struct device *dev = &whcrc->umc_dev->dev; + struct umc_dev *umc_dev = whcrc->umc_dev; + + whcrc->area = umc_dev->resource.start; + whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; + result = -EBUSY; + if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) + == NULL) { + dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", + whcrc->rc_len, whcrc->area, result); + goto error_request_region; + } + + whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len); + if (whcrc->rc_base == NULL) { + dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", + whcrc->rc_len, whcrc->area, result); + goto error_ioremap_nocache; + } + + result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, + KBUILD_MODNAME, whcrc); + if (result < 0) { + dev_err(dev, "can't allocate IRQ %d: %d\n", + umc_dev->irq, result); + goto error_request_irq; + } + + result = -ENOMEM; + whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, + &whcrc->cmd_dma_buf, GFP_KERNEL); + if (whcrc->cmd_buf == NULL) { + dev_err(dev, "Can't allocate cmd transfer buffer\n"); + goto error_cmd_buffer; + } + + whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, + &whcrc->evt_dma_buf, GFP_KERNEL); + if (whcrc->evt_buf == NULL) { + dev_err(dev, "Can't allocate evt transfer buffer\n"); + goto error_evt_buffer; + } + d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n", + whcrc->rc_len, whcrc->rc_base, umc_dev->irq); + return 0; + +error_evt_buffer: + dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, + whcrc->cmd_dma_buf); +error_cmd_buffer: + free_irq(umc_dev->irq, whcrc); +error_request_irq: + iounmap(whcrc->rc_base); +error_ioremap_nocache: + release_mem_region(whcrc->area, whcrc->rc_len); +error_request_region: + return result; +} + + +/** + * Release RC's UMC resources + */ +static +void whcrc_release_rc_umc(struct whcrc *whcrc) +{ + struct umc_dev *umc_dev = whcrc->umc_dev; + + dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf, + whcrc->evt_dma_buf); + dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, + whcrc->cmd_dma_buf); + free_irq(umc_dev->irq, whcrc); + iounmap(whcrc->rc_base); + release_mem_region(whcrc->area, whcrc->rc_len); +} + + +/** + * whcrc_start_rc - start a WHCI radio controller + * @whcrc: the radio controller to start + * + * Reset the UMC device, start the radio controller, enable events and + * finally enable interrupts. + */ +static int whcrc_start_rc(struct uwb_rc *rc) +{ + struct whcrc *whcrc = rc->priv; + int result = 0; + struct device *dev = &whcrc->umc_dev->dev; + unsigned long start, duration; + + /* Reset the thing */ + le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); + if (d_test(3)) + start = jiffies; + if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, + 5000, "device to reset at init") < 0) { + result = -EBUSY; + goto error; + } else if (d_test(3)) { + duration = jiffies - start; + if (duration > msecs_to_jiffies(40)) + dev_err(dev, "Device took %ums to " + "reset. MAX expected: 40ms\n", + jiffies_to_msecs(duration)); + } + + /* Set the event buffer, start the controller (enable IRQs later) */ + le_writel(0, whcrc->rc_base + URCINTR); + le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); + result = -ETIMEDOUT; + if (d_test(3)) + start = jiffies; + if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, + 5000, "device to start") < 0) + goto error; + if (d_test(3)) { + duration = jiffies - start; + if (duration > msecs_to_jiffies(40)) + dev_err(dev, "Device took %ums to start. " + "MAX expected: 40ms\n", + jiffies_to_msecs(duration)); + } + whcrc_enable_events(whcrc); + result = 0; + le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); +error: + return result; +} + + +/** + * whcrc_stop_rc - stop a WHCI radio controller + * @whcrc: the radio controller to stop + * + * Disable interrupts and cancel any pending event processing work + * before clearing the Run/Stop bit. + */ +static +void whcrc_stop_rc(struct uwb_rc *rc) +{ + struct whcrc *whcrc = rc->priv; + struct umc_dev *umc_dev = whcrc->umc_dev; + + le_writel(0, whcrc->rc_base + URCINTR); + cancel_work_sync(&whcrc->event_work); + + le_writel(0, whcrc->rc_base + URCCMD); + whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, + URCSTS_HALTED, 0, 40, "URCSTS.HALTED"); +} + +static void whcrc_init(struct whcrc *whcrc) +{ + spin_lock_init(&whcrc->irq_lock); + init_waitqueue_head(&whcrc->cmd_wq); + INIT_WORK(&whcrc->event_work, whcrc_event_work); +} + +/** + * Initialize the radio controller. + * + * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the + * IRQ handler we use that to determine if the hw is ready to + * handle events. Looks like a race condition, but it really is + * not. + */ +static +int whcrc_probe(struct umc_dev *umc_dev) +{ + int result; + struct uwb_rc *uwb_rc; + struct whcrc *whcrc; + struct device *dev = &umc_dev->dev; + + d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev); + result = -ENOMEM; + uwb_rc = uwb_rc_alloc(); + if (uwb_rc == NULL) { + dev_err(dev, "unable to allocate RC instance\n"); + goto error_rc_alloc; + } + whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL); + if (whcrc == NULL) { + dev_err(dev, "unable to allocate WHC-RC instance\n"); + goto error_alloc; + } + whcrc_init(whcrc); + whcrc->umc_dev = umc_dev; + + result = whcrc_setup_rc_umc(whcrc); + if (result < 0) { + dev_err(dev, "Can't setup RC UMC interface: %d\n", result); + goto error_setup_rc_umc; + } + whcrc->uwb_rc = uwb_rc; + + uwb_rc->owner = THIS_MODULE; + uwb_rc->cmd = whcrc_cmd; + uwb_rc->reset = whcrc_reset; + uwb_rc->start = whcrc_start_rc; + uwb_rc->stop = whcrc_stop_rc; + + result = uwb_rc_add(uwb_rc, dev, whcrc); + if (result < 0) + goto error_rc_add; + umc_set_drvdata(umc_dev, whcrc); + d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev); + return 0; + +error_rc_add: + whcrc_release_rc_umc(whcrc); +error_setup_rc_umc: + kfree(whcrc); +error_alloc: + uwb_rc_put(uwb_rc); +error_rc_alloc: + d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result); + return result; +} + +/** + * Clean up the radio control resources + * + * When we up the command semaphore, everybody possibly held trying to + * execute a command should be granted entry and then they'll see the + * host is quiescing and up it (so it will chain to the next waiter). + * This should not happen (in any case), as we can only remove when + * there are no handles open... + */ +static void whcrc_remove(struct umc_dev *umc_dev) +{ + struct whcrc *whcrc = umc_get_drvdata(umc_dev); + struct uwb_rc *uwb_rc = whcrc->uwb_rc; + + umc_set_drvdata(umc_dev, NULL); + uwb_rc_rm(uwb_rc); + whcrc_release_rc_umc(whcrc); + kfree(whcrc); + uwb_rc_put(uwb_rc); + d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc); +} + +/* PCI device ID's that we handle [so it gets loaded] */ +static struct pci_device_id whcrc_id_table[] = { + { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, + { /* empty last entry */ } +}; +MODULE_DEVICE_TABLE(pci, whcrc_id_table); + +static struct umc_driver whcrc_driver = { + .name = "whc-rc", + .cap_id = UMC_CAP_ID_WHCI_RC, + .probe = whcrc_probe, + .remove = whcrc_remove, +}; + +static int __init whcrc_driver_init(void) +{ + return umc_driver_register(&whcrc_driver); +} +module_init(whcrc_driver_init); + +static void __exit whcrc_driver_exit(void) +{ + umc_driver_unregister(&whcrc_driver); +} +module_exit(whcrc_driver_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From de520b8bd5525d33e6a6f36b297836125736bd2a Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:15 +0100 Subject: uwb: add HWA radio controller driver Add a driver for USB-connected UWB radio controllers (HWAs). Signed-off-by: David Vrabel diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig index c0eb973..a442f39 100644 --- a/drivers/uwb/Kconfig +++ b/drivers/uwb/Kconfig @@ -29,6 +29,18 @@ menuconfig UWB if UWB +config UWB_HWA + tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)" + depends on USB + help + This driver enables the radio controller for HWA USB + devices. HWA stands for Host Wire Adapter, and it is a UWB + Radio Controller connected to your system via USB. Most of + them come with a Wireless USB host controller also. + + To compile this driver select Y (built in) or M (module). It + is safe to select any even if you do not have the hardware. + config UWB_WHCI tristate "UWB Radio Control driver for WHCI-compliant cards" depends on PCI diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index bdcb494..6bdb8e7 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_UWB) += uwb.o obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o +obj-$(CONFIG_UWB_HWA) += hwa-rc.o uwb-objs := \ address.o \ diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c new file mode 100644 index 0000000..f822a18 --- /dev/null +++ b/drivers/uwb/hwa-rc.c @@ -0,0 +1,911 @@ +/* + * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6]) + * Radio Control command/event transport + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Initialize the Radio Control interface Driver. + * + * For each device probed, creates an 'struct hwarc' which contains + * just the representation of the UWB Radio Controller, and the logic + * for reading notifications and passing them to the UWB Core. + * + * So we initialize all of those, register the UWB Radio Controller + * and setup the notification/event handle to pipe the notifications + * to the UWB management Daemon. + * + * Command and event filtering. + * + * This is the driver for the Radio Control Interface described in WUSB + * 1.0. The core UWB module assumes that all drivers are compliant to the + * WHCI 0.95 specification. We thus create a filter that parses all + * incoming messages from the (WUSB 1.0) device and manipulate them to + * conform to the WHCI 0.95 specification. Similarly, outgoing messages + * are parsed and manipulated to conform to the WUSB 1.0 compliant messages + * that the device expects. Only a few messages are affected: + * Affected events: + * UWB_RC_EVT_BEACON + * UWB_RC_EVT_BP_SLOT_CHANGE + * UWB_RC_EVT_DRP_AVAIL + * UWB_RC_EVT_DRP + * Affected commands: + * UWB_RC_CMD_SCAN + * UWB_RC_CMD_SET_DRP_IE + * + * + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "uwb-internal.h" +#define D_LOCAL 1 +#include + + +/** + * Descriptor for an instance of the UWB Radio Control Driver that + * attaches to the RCI interface of the Host Wired Adapter. + * + * Unless there is a lock specific to the 'data members', all access + * is protected by uwb_rc->mutex. + * + * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to + * @rd_buffer. Note there is no locking because it is perfectly (heh!) + * serialized--probe() submits an URB, callback is called, processes + * the data (synchronously), submits another URB, and so on. There is + * no concurrent access to the buffer. + */ +struct hwarc { + struct usb_device *usb_dev; + struct usb_interface *usb_iface; + struct uwb_rc *uwb_rc; /* UWB host controller */ + struct urb *neep_urb; /* Notification endpoint handling */ + struct edc neep_edc; + void *rd_buffer; /* NEEP read buffer */ +}; + + +/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */ +struct uwb_rc_evt_beacon_WUSB_0100 { + struct uwb_rceb rceb; + u8 bChannelNumber; + __le16 wBPSTOffset; + u8 bLQI; + u8 bRSSI; + __le16 wBeaconInfoLength; + u8 BeaconInfo[]; +} __attribute__((packed)); + +/** + * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95 + * + * @header: the incoming event + * @buf_size: size of buffer containing incoming event + * @new_size: size of event after filtering completed + * + * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at + * the time we receive the beacon from WUSB so we just set it to + * UWB_RC_BEACON_TYPE_NEIGHBOR as a default. + * The solution below allocates memory upon receipt of every beacon from a + * WUSB device. This will deteriorate performance. What is the right way to + * do this? + */ +static +int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc, + struct uwb_rceb **header, + const size_t buf_size, + size_t *new_size) +{ + struct uwb_rc_evt_beacon_WUSB_0100 *be; + struct uwb_rc_evt_beacon *newbe; + size_t bytes_left, ielength; + struct device *dev = &rc->uwb_dev.dev; + + be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb); + bytes_left = buf_size; + if (bytes_left < sizeof(*be)) { + dev_err(dev, "Beacon Received Notification: Not enough data " + "to decode for filtering (%zu vs %zu bytes needed)\n", + bytes_left, sizeof(*be)); + return -EINVAL; + } + bytes_left -= sizeof(*be); + ielength = le16_to_cpu(be->wBeaconInfoLength); + if (bytes_left < ielength) { + dev_err(dev, "Beacon Received Notification: Not enough data " + "to decode IEs (%zu vs %zu bytes needed)\n", + bytes_left, ielength); + return -EINVAL; + } + newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC); + if (newbe == NULL) + return -ENOMEM; + newbe->rceb = be->rceb; + newbe->bChannelNumber = be->bChannelNumber; + newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR; + newbe->wBPSTOffset = be->wBPSTOffset; + newbe->bLQI = be->bLQI; + newbe->bRSSI = be->bRSSI; + newbe->wBeaconInfoLength = be->wBeaconInfoLength; + memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength); + *header = &newbe->rceb; + *new_size = sizeof(*newbe) + ielength; + return 1; /* calling function will free memory */ +} + + +/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */ +struct uwb_rc_evt_drp_avail_WUSB_0100 { + struct uwb_rceb rceb; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/** + * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95 + * + * @header: the incoming event + * @buf_size: size of buffer containing incoming event + * @new_size: size of event after filtering completed + */ +static +int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc, + struct uwb_rceb **header, + const size_t buf_size, + size_t *new_size) +{ + struct uwb_rc_evt_drp_avail_WUSB_0100 *da; + struct uwb_rc_evt_drp_avail *newda; + struct uwb_ie_hdr *ie_hdr; + size_t bytes_left, ielength; + struct device *dev = &rc->uwb_dev.dev; + + + da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb); + bytes_left = buf_size; + if (bytes_left < sizeof(*da)) { + dev_err(dev, "Not enough data to decode DRP Avail " + "Notification for filtering. Expected %zu, " + "received %zu.\n", (size_t)sizeof(*da), bytes_left); + return -EINVAL; + } + bytes_left -= sizeof(*da); + ielength = le16_to_cpu(da->wIELength); + if (bytes_left < ielength) { + dev_err(dev, "DRP Avail Notification filter: IE length " + "[%zu bytes] does not match actual length " + "[%zu bytes].\n", ielength, bytes_left); + return -EINVAL; + } + if (ielength < sizeof(*ie_hdr)) { + dev_err(dev, "DRP Avail Notification filter: Not enough " + "data to decode IE [%zu bytes, %zu needed]\n", + ielength, sizeof(*ie_hdr)); + return -EINVAL; + } + ie_hdr = (void *) da->IEData; + if (ie_hdr->length > 32) { + dev_err(dev, "DRP Availability Change event has unexpected " + "length for filtering. Expected < 32 bytes, " + "got %zu bytes.\n", (size_t)ie_hdr->length); + return -EINVAL; + } + newda = kzalloc(sizeof(*newda), GFP_ATOMIC); + if (newda == NULL) + return -ENOMEM; + newda->rceb = da->rceb; + memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length); + *header = &newda->rceb; + *new_size = sizeof(*newda); + return 1; /* calling function will free memory */ +} + + +/* DRP notification (WUSB 1.0 [8.6.3.9]) */ +struct uwb_rc_evt_drp_WUSB_0100 { + struct uwb_rceb rceb; + struct uwb_dev_addr wSrcAddr; + u8 bExplicit; + __le16 wIELength; + u8 IEData[]; +} __attribute__((packed)); + +/** + * Filter WUSB 1.0 DRP Notification to be WHCI 0.95 + * + * @header: the incoming event + * @buf_size: size of buffer containing incoming event + * @new_size: size of event after filtering completed + * + * It is hard to manage DRP reservations without having a Reason code. + * Unfortunately there is none in the WUSB spec. We just set the default to + * DRP IE RECEIVED. + * We do not currently use the bBeaconSlotNumber value, so we set this to + * zero for now. + */ +static +int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc, + struct uwb_rceb **header, + const size_t buf_size, + size_t *new_size) +{ + struct uwb_rc_evt_drp_WUSB_0100 *drpev; + struct uwb_rc_evt_drp *newdrpev; + size_t bytes_left, ielength; + struct device *dev = &rc->uwb_dev.dev; + + drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb); + bytes_left = buf_size; + if (bytes_left < sizeof(*drpev)) { + dev_err(dev, "Not enough data to decode DRP Notification " + "for filtering. Expected %zu, received %zu.\n", + (size_t)sizeof(*drpev), bytes_left); + return -EINVAL; + } + ielength = le16_to_cpu(drpev->wIELength); + bytes_left -= sizeof(*drpev); + if (bytes_left < ielength) { + dev_err(dev, "DRP Notification filter: header length [%zu " + "bytes] does not match actual length [%zu " + "bytes].\n", ielength, bytes_left); + return -EINVAL; + } + newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC); + if (newdrpev == NULL) + return -ENOMEM; + newdrpev->rceb = drpev->rceb; + newdrpev->src_addr = drpev->wSrcAddr; + newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD; + newdrpev->beacon_slot_number = 0; + newdrpev->ie_length = drpev->wIELength; + memcpy(newdrpev->ie_data, drpev->IEData, ielength); + *header = &newdrpev->rceb; + *new_size = sizeof(*newdrpev) + ielength; + return 1; /* calling function will free memory */ +} + + +/* Scan Command (WUSB 1.0 [8.6.2.5]) */ +struct uwb_rc_cmd_scan_WUSB_0100 { + struct uwb_rccb rccb; + u8 bChannelNumber; + u8 bScanState; +} __attribute__((packed)); + +/** + * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command + * + * @header: command sent to device (compliant to WHCI 0.95) + * @size: size of command sent to device + * + * We only reduce the size by two bytes because the WUSB 1.0 scan command + * does not have the last field (wStarttime). Also, make sure we don't send + * the device an unexpected scan type. + */ +static +int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc, + struct uwb_rccb **header, + size_t *size) +{ + struct uwb_rc_cmd_scan *sc; + + sc = container_of(*header, struct uwb_rc_cmd_scan, rccb); + + if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME) + sc->bScanState = UWB_SCAN_ONLY; + /* Don't send the last two bytes. */ + *size -= 2; + return 0; +} + + +/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */ +struct uwb_rc_cmd_set_drp_ie_WUSB_0100 { + struct uwb_rccb rccb; + u8 bExplicit; + __le16 wIELength; + struct uwb_ie_drp IEData[]; +} __attribute__((packed)); + +/** + * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command + * + * @header: command sent to device (compliant to WHCI 0.95) + * @size: size of command sent to device + * + * WUSB has an extra bExplicit field - we assume always explicit + * negotiation so this field is set. The command expected by the device is + * thus larger than the one prepared by the driver so we need to + * reallocate memory to accommodate this. + * We trust the driver to send us the correct data so no checking is done + * on incoming data - evn though it is variable length. + */ +static +int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc, + struct uwb_rccb **header, + size_t *size) +{ + struct uwb_rc_cmd_set_drp_ie *orgcmd; + struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd; + size_t ielength; + + orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb); + ielength = le16_to_cpu(orgcmd->wIELength); + cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + cmd->rccb = orgcmd->rccb; + cmd->bExplicit = 0; + cmd->wIELength = orgcmd->wIELength; + memcpy(cmd->IEData, orgcmd->IEData, ielength); + *header = &cmd->rccb; + *size = sizeof(*cmd) + ielength; + return 1; /* calling function will free memory */ +} + + +/** + * Filter data from WHCI driver to WUSB device + * + * @header: WHCI 0.95 compliant command from driver + * @size: length of command + * + * The routine managing commands to the device (uwb_rc_cmd()) will call the + * filtering function pointer (if it exists) before it passes any data to + * the device. At this time the command has been formatted according to + * WHCI 0.95 and is ready to be sent to the device. + * + * The filter function will be provided with the current command and its + * length. The function will manipulate the command if necessary and + * potentially reallocate memory for a command that needed more memory that + * the given command. If new memory was created the function will return 1 + * to indicate to the calling function that the memory need to be freed + * when not needed any more. The size will contain the new length of the + * command. + * If memory has not been allocated we rely on the original mechanisms to + * free the memory of the command - even when we reduce the value of size. + */ +static +int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header, + size_t *size) +{ + int result; + struct uwb_rccb *rccb = *header; + int cmd = le16_to_cpu(rccb->wCommand); + switch (cmd) { + case UWB_RC_CMD_SCAN: + result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size); + break; + case UWB_RC_CMD_SET_DRP_IE: + result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size); + break; + default: + result = -ENOANO; + break; + } + return result; +} + + +/** + * Filter data from WHCI driver to WUSB device + * + * @header: WHCI 0.95 compliant command from driver + * @size: length of command + * + * Filter commands based on which protocol the device supports. The WUSB + * errata should be the same as WHCI 0.95 so we do not filter that here - + * only WUSB 1.0. + */ +static +int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header, + size_t *size) +{ + int result = -ENOANO; + if (rc->version == 0x0100) + result = hwarc_filter_cmd_WUSB_0100(rc, header, size); + return result; +} + + +/** + * Compute return value as sum of incoming value and value at given offset + * + * @rceb: event for which we compute the size, it contains a variable + * length field. + * @core_size: size of the "non variable" part of the event + * @offset: place in event where the length of the variable part is stored + * @buf_size: total length of buffer in which event arrived - we need to make + * sure we read the offset in memory that is still part of the event + */ +static +ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, + size_t core_size, size_t offset, + const size_t buf_size) +{ + ssize_t size = -ENOSPC; + const void *ptr = rceb; + size_t type_size = sizeof(__le16); + struct device *dev = &rc->uwb_dev.dev; + + if (offset + type_size >= buf_size) { + dev_err(dev, "Not enough data to read extra size of event " + "0x%02x/%04x/%02x, only got %zu bytes.\n", + rceb->bEventType, le16_to_cpu(rceb->wEvent), + rceb->bEventContext, buf_size); + goto out; + } + ptr += offset; + size = core_size + le16_to_cpu(*(__le16 *)ptr); +out: + return size; +} + + +/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */ +struct uwb_rc_evt_bp_slot_change_WUSB_0100 { + struct uwb_rceb rceb; + u8 bSlotNumber; +} __attribute__((packed)); + + +/** + * Filter data from WUSB device to WHCI driver + * + * @header: incoming event + * @buf_size: size of buffer in which event arrived + * @_event_size: actual size of event in the buffer + * @new_size: size of event after filtered + * + * We don't know how the buffer is constructed - there may be more than one + * event in it so buffer length does not determine event length. We first + * determine the expected size of the incoming event. This value is passed + * back only if the actual filtering succeeded (so we know the computed + * expected size is correct). This value will be zero if + * the event did not need any filtering. + * + * WHCI interprets the BP Slot Change event's data differently than + * WUSB. The event sizes are exactly the same. The data field + * indicates the new beacon slot in which a RC is transmitting its + * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368 + * 17.16 (Table 117)). We thus know that the WUSB value will not set + * the bit bNoSlot, so we don't really do anything (placeholder). + */ +static +int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header, + const size_t buf_size, size_t *_real_size, + size_t *_new_size) +{ + int result = -ENOANO; + struct uwb_rceb *rceb = *header; + int event = le16_to_cpu(rceb->wEvent); + size_t event_size; + size_t core_size, offset; + + if (rceb->bEventType != UWB_RC_CET_GENERAL) + goto out; + switch (event) { + case UWB_RC_EVT_BEACON: + core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100); + offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100, + wBeaconInfoLength); + event_size = hwarc_get_event_size(rc, rceb, core_size, + offset, buf_size); + if (event_size < 0) + goto out; + *_real_size = event_size; + result = hwarc_filter_evt_beacon_WUSB_0100(rc, header, + buf_size, _new_size); + break; + case UWB_RC_EVT_BP_SLOT_CHANGE: + *_new_size = *_real_size = + sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100); + result = 0; + break; + + case UWB_RC_EVT_DRP_AVAIL: + core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100); + offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100, + wIELength); + event_size = hwarc_get_event_size(rc, rceb, core_size, + offset, buf_size); + if (event_size < 0) + goto out; + *_real_size = event_size; + result = hwarc_filter_evt_drp_avail_WUSB_0100( + rc, header, buf_size, _new_size); + break; + + case UWB_RC_EVT_DRP: + core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100); + offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength); + event_size = hwarc_get_event_size(rc, rceb, core_size, + offset, buf_size); + if (event_size < 0) + goto out; + *_real_size = event_size; + result = hwarc_filter_evt_drp_WUSB_0100(rc, header, + buf_size, _new_size); + break; + + default: + break; + } +out: + return result; +} + +/** + * Filter data from WUSB device to WHCI driver + * + * @header: incoming event + * @buf_size: size of buffer in which event arrived + * @_event_size: actual size of event in the buffer + * @_new_size: size of event after filtered + * + * Filter events based on which protocol the device supports. The WUSB + * errata should be the same as WHCI 0.95 so we do not filter that here - + * only WUSB 1.0. + * + * If we don't handle it, we return -ENOANO (why the weird error code? + * well, so if I get it, I can pinpoint in the code that raised + * it...after all, not too many places use the higher error codes). + */ +static +int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header, + const size_t buf_size, size_t *_real_size, + size_t *_new_size) +{ + int result = -ENOANO; + if (rc->version == 0x0100) + result = hwarc_filter_event_WUSB_0100( + rc, header, buf_size, _real_size, _new_size); + return result; +} + + +/** + * Execute an UWB RC command on HWA + * + * @rc: Instance of a Radio Controller that is a HWA + * @cmd: Buffer containing the RCCB and payload to execute + * @cmd_size: Size of the command buffer. + * + * NOTE: rc's mutex has to be locked + */ +static +int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size) +{ + struct hwarc *hwarc = uwb_rc->priv; + return usb_control_msg( + hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0), + WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber, + (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */); +} + +static +int hwarc_reset(struct uwb_rc *uwb_rc) +{ + struct hwarc *hwarc = uwb_rc->priv; + return usb_reset_device(hwarc->usb_dev); +} + +/** + * Callback for the notification and event endpoint + * + * Check's that everything is fine and then passes the read data to + * the notification/event handling mechanism (neh). + */ +static +void hwarc_neep_cb(struct urb *urb) +{ + struct hwarc *hwarc = urb->context; + struct usb_interface *usb_iface = hwarc->usb_iface; + struct device *dev = &usb_iface->dev; + int result; + + switch (result = urb->status) { + case 0: + d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n", + urb->status, (size_t)urb->actual_length); + uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, + urb->actual_length); + break; + case -ECONNRESET: /* Not an error, but a controlled situation; */ + case -ENOENT: /* (we killed the URB)...so, no broadcast */ + d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status); + goto out; + case -ESHUTDOWN: /* going away! */ + d_printf(2, dev, "NEEP: URB down %d\n", urb->status); + goto out; + default: /* On general errors, retry unless it gets ugly */ + if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)) + goto error_exceeded; + dev_err(dev, "NEEP: URB error %d\n", urb->status); + } + result = usb_submit_urb(urb, GFP_ATOMIC); + d_printf(3, dev, "NEEP: submit %d\n", result); + if (result < 0) { + dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", + result); + goto error; + } +out: + return; + +error_exceeded: + dev_err(dev, "NEEP: URB max acceptable errors " + "exceeded, resetting device\n"); +error: + uwb_rc_neh_error(hwarc->uwb_rc, result); + uwb_rc_reset_all(hwarc->uwb_rc); + return; +} + +static void hwarc_init(struct hwarc *hwarc) +{ + edc_init(&hwarc->neep_edc); +} + +/** + * Initialize the notification/event endpoint stuff + * + * Note this is effectively a parallel thread; it knows that + * hwarc->uwb_rc always exists because the existence of a 'hwarc' + * means that there is a reverence on the hwarc->uwb_rc (see + * _probe()), and thus _neep_cb() can execute safely. + */ +static int hwarc_neep_init(struct uwb_rc *rc) +{ + struct hwarc *hwarc = rc->priv; + struct usb_interface *iface = hwarc->usb_iface; + struct usb_device *usb_dev = interface_to_usbdev(iface); + struct device *dev = &iface->dev; + int result; + struct usb_endpoint_descriptor *epd; + + epd = &iface->cur_altsetting->endpoint[0].desc; + hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL); + if (hwarc->rd_buffer == NULL) { + dev_err(dev, "Unable to allocate notification's read buffer\n"); + goto error_rd_buffer; + } + hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL); + if (hwarc->neep_urb == NULL) { + dev_err(dev, "Unable to allocate notification URB\n"); + goto error_urb_alloc; + } + usb_fill_int_urb(hwarc->neep_urb, usb_dev, + usb_rcvintpipe(usb_dev, epd->bEndpointAddress), + hwarc->rd_buffer, PAGE_SIZE, + hwarc_neep_cb, hwarc, epd->bInterval); + result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC); + if (result < 0) { + dev_err(dev, "Cannot submit notification URB: %d\n", result); + goto error_neep_submit; + } + return 0; + +error_neep_submit: + usb_free_urb(hwarc->neep_urb); +error_urb_alloc: + free_page((unsigned long)hwarc->rd_buffer); +error_rd_buffer: + return -ENOMEM; +} + + +/** Clean up all the notification endpoint resources */ +static void hwarc_neep_release(struct uwb_rc *rc) +{ + struct hwarc *hwarc = rc->priv; + + usb_kill_urb(hwarc->neep_urb); + usb_free_urb(hwarc->neep_urb); + free_page((unsigned long)hwarc->rd_buffer); +} + +/** + * Get the version from class-specific descriptor + * + * NOTE: this descriptor comes with the big bundled configuration + * descriptor that includes the interfaces' and endpoints', so + * we just look for it in the cached copy kept by the USB stack. + * + * NOTE2: We convert LE fields to CPU order. + */ +static int hwarc_get_version(struct uwb_rc *rc) +{ + int result; + + struct hwarc *hwarc = rc->priv; + struct uwb_rc_control_intf_class_desc *descr; + struct device *dev = &rc->uwb_dev.dev; + struct usb_device *usb_dev = hwarc->usb_dev; + char *itr; + struct usb_descriptor_header *hdr; + size_t itr_size, actconfig_idx; + u16 version; + + actconfig_idx = (usb_dev->actconfig - usb_dev->config) / + sizeof(usb_dev->config[0]); + itr = usb_dev->rawdescriptors[actconfig_idx]; + itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); + while (itr_size >= sizeof(*hdr)) { + hdr = (struct usb_descriptor_header *) itr; + d_printf(3, dev, "Extra device descriptor: " + "type %02x/%u bytes @ %zu (%zu left)\n", + hdr->bDescriptorType, hdr->bLength, + (itr - usb_dev->rawdescriptors[actconfig_idx]), + itr_size); + if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) + goto found; + itr += hdr->bLength; + itr_size -= hdr->bLength; + } + dev_err(dev, "cannot find Radio Control Interface Class descriptor\n"); + return -ENODEV; + +found: + result = -EINVAL; + if (hdr->bLength > itr_size) { /* is it available? */ + dev_err(dev, "incomplete Radio Control Interface Class " + "descriptor (%zu bytes left, %u needed)\n", + itr_size, hdr->bLength); + goto error; + } + if (hdr->bLength < sizeof(*descr)) { + dev_err(dev, "short Radio Control Interface Class " + "descriptor\n"); + goto error; + } + descr = (struct uwb_rc_control_intf_class_desc *) hdr; + /* Make LE fields CPU order */ + version = __le16_to_cpu(descr->bcdRCIVersion); + if (version != 0x0100) { + dev_err(dev, "Device reports protocol version 0x%04x. We " + "do not support that. \n", version); + result = -EINVAL; + goto error; + } + rc->version = version; + d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n", + rc->version); + result = 0; +error: + return result; +} + +/* + * By creating a 'uwb_rc', we have a reference on it -- that reference + * is the one we drop when we disconnect. + * + * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there + * is only one altsetting allowed. + */ +static int hwarc_probe(struct usb_interface *iface, + const struct usb_device_id *id) +{ + int result; + struct uwb_rc *uwb_rc; + struct hwarc *hwarc; + struct device *dev = &iface->dev; + + result = -ENOMEM; + uwb_rc = uwb_rc_alloc(); + if (uwb_rc == NULL) { + dev_err(dev, "unable to allocate RC instance\n"); + goto error_rc_alloc; + } + hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL); + if (hwarc == NULL) { + dev_err(dev, "unable to allocate HWA RC instance\n"); + goto error_alloc; + } + hwarc_init(hwarc); + hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface)); + hwarc->usb_iface = usb_get_intf(iface); + hwarc->uwb_rc = uwb_rc; + + uwb_rc->owner = THIS_MODULE; + uwb_rc->start = hwarc_neep_init; + uwb_rc->stop = hwarc_neep_release; + uwb_rc->cmd = hwarc_cmd; + uwb_rc->reset = hwarc_reset; + uwb_rc->filter_cmd = hwarc_filter_cmd; + uwb_rc->filter_event = hwarc_filter_event; + + result = uwb_rc_add(uwb_rc, dev, hwarc); + if (result < 0) + goto error_rc_add; + result = hwarc_get_version(uwb_rc); + if (result < 0) { + dev_err(dev, "cannot retrieve version of RC \n"); + goto error_get_version; + } + usb_set_intfdata(iface, hwarc); + return 0; + +error_get_version: + uwb_rc_rm(uwb_rc); +error_rc_add: + usb_put_intf(iface); + usb_put_dev(hwarc->usb_dev); +error_alloc: + uwb_rc_put(uwb_rc); +error_rc_alloc: + return result; +} + +static void hwarc_disconnect(struct usb_interface *iface) +{ + struct hwarc *hwarc = usb_get_intfdata(iface); + struct uwb_rc *uwb_rc = hwarc->uwb_rc; + + usb_set_intfdata(hwarc->usb_iface, NULL); + uwb_rc_rm(uwb_rc); + usb_put_intf(hwarc->usb_iface); + usb_put_dev(hwarc->usb_dev); + d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc); + kfree(hwarc); + uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ +} + +/** USB device ID's that we handle */ +static struct usb_device_id hwarc_id_table[] = { + { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), }, + { }, +}; +MODULE_DEVICE_TABLE(usb, hwarc_id_table); + +static struct usb_driver hwarc_driver = { + .name = "hwa-rc", + .probe = hwarc_probe, + .disconnect = hwarc_disconnect, + .id_table = hwarc_id_table, +}; + +static int __init hwarc_driver_init(void) +{ + int result; + result = usb_register(&hwarc_driver); + if (result < 0) + printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n", + result); + return result; + +} +module_init(hwarc_driver_init); + +static void __exit hwarc_driver_exit(void) +{ + usb_deregister(&hwarc_driver); +} +module_exit(hwarc_driver_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From f51448543f8e4871f0539435fce42a14044f5652 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Wed, 17 Sep 2008 16:34:16 +0100 Subject: uwb: add the WiMedia LLC Protocol stack Add the generic code for the WiMedia Logical Link Control Protocol (WLP). This has been split into several patches for easier review. core (this patch): - everything else messages: - WLP message construction/decode wss: - Wireless Service Set support build-system: - Kconfig and Kbuild files Signed-off-by: David Vrabel diff --git a/drivers/uwb/wlp/driver.c b/drivers/uwb/wlp/driver.c new file mode 100644 index 0000000..cb8d699 --- /dev/null +++ b/drivers/uwb/wlp/driver.c @@ -0,0 +1,43 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * + * Copyright (C) 2007 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Life cycle of WLP substack + * + * FIXME: Docs + */ + +#include + +static int __init wlp_subsys_init(void) +{ + return 0; +} +module_init(wlp_subsys_init); + +static void __exit wlp_subsys_exit(void) +{ + return; +} +module_exit(wlp_subsys_exit); + +MODULE_AUTHOR("Reinette Chatre "); +MODULE_DESCRIPTION("WiMedia Logical Link Control Protocol (WLP)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c new file mode 100644 index 0000000..cdfe8df --- /dev/null +++ b/drivers/uwb/wlp/eda.c @@ -0,0 +1,449 @@ +/* + * WUSB Wire Adapter: WLP interface + * Ethernet to device address cache + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * We need to be able to map ethernet addresses to device addresses + * and back because there is not explicit relationship between the eth + * addresses used in the ETH frames and the device addresses (no, it + * would not have been simpler to force as ETH address the MBOA MAC + * address...no, not at all :). + * + * A device has one MBOA MAC address and one device address. It is possible + * for a device to have more than one virtual MAC address (although a + * virtual address can be the same as the MBOA MAC address). The device + * address is guaranteed to be unique among the devices in the extended + * beacon group (see ECMA 17.1.1). We thus use the device address as index + * to this cache. We do allow searching based on virtual address as this + * is how Ethernet frames will be addressed. + * + * We need to support virtual EUI-48. Although, right now the virtual + * EUI-48 will always be the same as the MAC SAP address. The EDA cache + * entry thus contains a MAC SAP address as well as the virtual address + * (used to map the network stack address to a neighbor). When we move + * to support more than one virtual MAC on a host then this organization + * will have to change. Perhaps a neighbor has a list of WSSs, each with a + * tag and virtual EUI-48. + * + * On data transmission + * it is used to determine if the neighbor is connected and what WSS it + * belongs to. With this we know what tag to add to the WLP frame. Storing + * the WSS in the EDA cache may be overkill because we only support one + * WSS. Hopefully we will support more than one WSS at some point. + * On data reception it is used to determine the WSS based on + * the tag and address of the transmitting neighbor. + */ + +#define D_LOCAL 5 +#include +#include +#include +#include +#include "wlp-internal.h" + + +/* FIXME: cache is not purged, only on device close */ + +/* FIXME: does not scale, change to dynamic array */ + +/* + * Initialize the EDA cache + * + * @returns 0 if ok, < 0 errno code on error + * + * Call when the interface is being brought up + * + * NOTE: Keep it as a separate function as the implementation will + * change and be more complex. + */ +void wlp_eda_init(struct wlp_eda *eda) +{ + INIT_LIST_HEAD(&eda->cache); + spin_lock_init(&eda->lock); +} + +/* + * Release the EDA cache + * + * @returns 0 if ok, < 0 errno code on error + * + * Called when the interface is brought down + */ +void wlp_eda_release(struct wlp_eda *eda) +{ + unsigned long flags; + struct wlp_eda_node *itr, *next; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry_safe(itr, next, &eda->cache, list_node) { + list_del(&itr->list_node); + kfree(itr); + } + spin_unlock_irqrestore(&eda->lock, flags); +} + +/* + * Add an address mapping + * + * @returns 0 if ok, < 0 errno code on error + * + * An address mapping is initially created when the neighbor device is seen + * for the first time (it is "onair"). At this time the neighbor is not + * connected or associated with a WSS so we only populate the Ethernet and + * Device address fields. + * + */ +int wlp_eda_create_node(struct wlp_eda *eda, + const unsigned char eth_addr[ETH_ALEN], + const struct uwb_dev_addr *dev_addr) +{ + int result = 0; + struct wlp_eda_node *itr; + unsigned long flags; + + BUG_ON(dev_addr == NULL || eth_addr == NULL); + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry(itr, &eda->cache, list_node) { + if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { + printk(KERN_ERR "EDA cache already contains entry " + "for neighbor %02x:%02x\n", + dev_addr->data[1], dev_addr->data[0]); + result = -EEXIST; + goto out_unlock; + } + } + itr = kzalloc(sizeof(*itr), GFP_ATOMIC); + if (itr != NULL) { + memcpy(itr->eth_addr, eth_addr, sizeof(itr->eth_addr)); + itr->dev_addr = *dev_addr; + list_add(&itr->list_node, &eda->cache); + } else + result = -ENOMEM; +out_unlock: + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} + +/* + * Remove entry from EDA cache + * + * This is done when the device goes off air. + */ +void wlp_eda_rm_node(struct wlp_eda *eda, const struct uwb_dev_addr *dev_addr) +{ + struct wlp_eda_node *itr, *next; + unsigned long flags; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry_safe(itr, next, &eda->cache, list_node) { + if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { + list_del(&itr->list_node); + kfree(itr); + break; + } + } + spin_unlock_irqrestore(&eda->lock, flags); +} + +/* + * Update an address mapping + * + * @returns 0 if ok, < 0 errno code on error + */ +int wlp_eda_update_node(struct wlp_eda *eda, + const struct uwb_dev_addr *dev_addr, + struct wlp_wss *wss, + const unsigned char virt_addr[ETH_ALEN], + const u8 tag, const enum wlp_wss_connect state) +{ + int result = -ENOENT; + struct wlp_eda_node *itr; + unsigned long flags; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry(itr, &eda->cache, list_node) { + if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { + /* Found it, update it */ + itr->wss = wss; + memcpy(itr->virt_addr, virt_addr, + sizeof(itr->virt_addr)); + itr->tag = tag; + itr->state = state; + result = 0; + goto out_unlock; + } + } + /* Not found */ +out_unlock: + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} + +/* + * Update only state field of an address mapping + * + * @returns 0 if ok, < 0 errno code on error + */ +int wlp_eda_update_node_state(struct wlp_eda *eda, + const struct uwb_dev_addr *dev_addr, + const enum wlp_wss_connect state) +{ + int result = -ENOENT; + struct wlp_eda_node *itr; + unsigned long flags; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry(itr, &eda->cache, list_node) { + if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { + /* Found it, update it */ + itr->state = state; + result = 0; + goto out_unlock; + } + } + /* Not found */ +out_unlock: + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} + +/* + * Return contents of EDA cache entry + * + * @dev_addr: index to EDA cache + * @eda_entry: pointer to where contents of EDA cache will be copied + */ +int wlp_copy_eda_node(struct wlp_eda *eda, struct uwb_dev_addr *dev_addr, + struct wlp_eda_node *eda_entry) +{ + int result = -ENOENT; + struct wlp_eda_node *itr; + unsigned long flags; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry(itr, &eda->cache, list_node) { + if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) { + *eda_entry = *itr; + result = 0; + goto out_unlock; + } + } + /* Not found */ +out_unlock: + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} + +/* + * Execute function for every element in the cache + * + * @function: function to execute on element of cache (must be atomic) + * @priv: private data of function + * @returns: result of first function that failed, or last function + * executed if no function failed. + * + * Stop executing when function returns error for any element in cache. + * + * IMPORTANT: We are using a spinlock here: the function executed on each + * element has to be atomic. + */ +int wlp_eda_for_each(struct wlp_eda *eda, wlp_eda_for_each_f function, + void *priv) +{ + int result = 0; + struct wlp *wlp = container_of(eda, struct wlp, eda); + struct wlp_eda_node *entry; + unsigned long flags; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry(entry, &eda->cache, list_node) { + result = (*function)(wlp, entry, priv); + if (result < 0) + break; + } + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} + +/* + * Execute function for single element in the cache (return dev addr) + * + * @virt_addr: index into EDA cache used to determine which element to + * execute the function on + * @dev_addr: device address of element in cache will be returned using + * @dev_addr + * @function: function to execute on element of cache (must be atomic) + * @priv: private data of function + * @returns: result of function + * + * IMPORTANT: We are using a spinlock here: the function executed on the + * element has to be atomic. + */ +int wlp_eda_for_virtual(struct wlp_eda *eda, + const unsigned char virt_addr[ETH_ALEN], + struct uwb_dev_addr *dev_addr, + wlp_eda_for_each_f function, + void *priv) +{ + int result = 0; + struct wlp *wlp = container_of(eda, struct wlp, eda); + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_eda_node *itr; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&eda->lock, flags); + list_for_each_entry(itr, &eda->cache, list_node) { + if (!memcmp(itr->virt_addr, virt_addr, + sizeof(itr->virt_addr))) { + d_printf(6, dev, "EDA: looking for " + "%02x:%02x:%02x:%02x:%02x:%02x hit %02x:%02x " + "wss %p tag 0x%02x state %u\n", + virt_addr[0], virt_addr[1], + virt_addr[2], virt_addr[3], + virt_addr[4], virt_addr[5], + itr->dev_addr.data[1], + itr->dev_addr.data[0], itr->wss, + itr->tag, itr->state); + result = (*function)(wlp, itr, priv); + *dev_addr = itr->dev_addr; + found = 1; + break; + } else + d_printf(6, dev, "EDA: looking for " + "%02x:%02x:%02x:%02x:%02x:%02x " + "against " + "%02x:%02x:%02x:%02x:%02x:%02x miss\n", + virt_addr[0], virt_addr[1], + virt_addr[2], virt_addr[3], + virt_addr[4], virt_addr[5], + itr->virt_addr[0], itr->virt_addr[1], + itr->virt_addr[2], itr->virt_addr[3], + itr->virt_addr[4], itr->virt_addr[5]); + } + if (!found) { + if (printk_ratelimit()) + dev_err(dev, "EDA: Eth addr %02x:%02x:%02x" + ":%02x:%02x:%02x not found.\n", + virt_addr[0], virt_addr[1], + virt_addr[2], virt_addr[3], + virt_addr[4], virt_addr[5]); + result = -ENODEV; + } + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} + +static const char *__wlp_wss_connect_state[] = { "WLP_WSS_UNCONNECTED", + "WLP_WSS_CONNECTED", + "WLP_WSS_CONNECT_FAILED", +}; + +static const char *wlp_wss_connect_state_str(unsigned id) +{ + if (id >= ARRAY_SIZE(__wlp_wss_connect_state)) + return "unknown WSS connection state"; + return __wlp_wss_connect_state[id]; +} + +/* + * View EDA cache from user space + * + * A debugging feature to give user visibility into the EDA cache. Also + * used to display members of WSS to user (called from wlp_wss_members_show()) + */ +ssize_t wlp_eda_show(struct wlp *wlp, char *buf) +{ + ssize_t result = 0; + struct wlp_eda_node *entry; + unsigned long flags; + struct wlp_eda *eda = &wlp->eda; + spin_lock_irqsave(&eda->lock, flags); + result = scnprintf(buf, PAGE_SIZE, "#eth_addr dev_addr wss_ptr " + "tag state virt_addr\n"); + list_for_each_entry(entry, &eda->cache, list_node) { + result += scnprintf(buf + result, PAGE_SIZE - result, + "%02x:%02x:%02x:%02x:%02x:%02x %02x:%02x " + "%p 0x%02x %s " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + entry->eth_addr[0], entry->eth_addr[1], + entry->eth_addr[2], entry->eth_addr[3], + entry->eth_addr[4], entry->eth_addr[5], + entry->dev_addr.data[1], + entry->dev_addr.data[0], entry->wss, + entry->tag, + wlp_wss_connect_state_str(entry->state), + entry->virt_addr[0], entry->virt_addr[1], + entry->virt_addr[2], entry->virt_addr[3], + entry->virt_addr[4], entry->virt_addr[5]); + if (result >= PAGE_SIZE) + break; + } + spin_unlock_irqrestore(&eda->lock, flags); + return result; +} +EXPORT_SYMBOL_GPL(wlp_eda_show); + +/* + * Add new EDA cache entry based on user input in sysfs + * + * Should only be used for debugging. + * + * The WSS is assumed to be the only WSS supported. This needs to be + * redesigned when we support more than one WSS. + */ +ssize_t wlp_eda_store(struct wlp *wlp, const char *buf, size_t size) +{ + ssize_t result; + struct wlp_eda *eda = &wlp->eda; + u8 eth_addr[6]; + struct uwb_dev_addr dev_addr; + u8 tag; + unsigned state; + + result = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx " + "%02hhx:%02hhx %02hhx %u\n", + ð_addr[0], ð_addr[1], + ð_addr[2], ð_addr[3], + ð_addr[4], ð_addr[5], + &dev_addr.data[1], &dev_addr.data[0], &tag, &state); + switch (result) { + case 6: /* no dev addr specified -- remove entry NOT IMPLEMENTED */ + /*result = wlp_eda_rm(eda, eth_addr, &dev_addr);*/ + result = -ENOSYS; + break; + case 10: + state = state >= 1 ? 1 : 0; + result = wlp_eda_create_node(eda, eth_addr, &dev_addr); + if (result < 0 && result != -EEXIST) + goto error; + /* Set virtual addr to be same as MAC */ + result = wlp_eda_update_node(eda, &dev_addr, &wlp->wss, + eth_addr, tag, state); + if (result < 0) + goto error; + break; + default: /* bad format */ + result = -EINVAL; + } +error: + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_eda_store); diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c new file mode 100644 index 0000000..1bb9b1f --- /dev/null +++ b/drivers/uwb/wlp/sysfs.c @@ -0,0 +1,709 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * sysfs functions + * + * Copyright (C) 2007 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: Docs + * + */ + +#include +#include "wlp-internal.h" + +static +size_t wlp_wss_wssid_e_print(char *buf, size_t bufsize, + struct wlp_wssid_e *wssid_e) +{ + size_t used = 0; + used += scnprintf(buf, bufsize, " WSS: "); + used += wlp_wss_uuid_print(buf + used, bufsize - used, + &wssid_e->wssid); + + if (wssid_e->info != NULL) { + used += scnprintf(buf + used, bufsize - used, " "); + used += uwb_mac_addr_print(buf + used, bufsize - used, + &wssid_e->info->bcast); + used += scnprintf(buf + used, bufsize - used, " %u %u %s\n", + wssid_e->info->accept_enroll, + wssid_e->info->sec_status, + wssid_e->info->name); + } + return used; +} + +/** + * Print out information learned from neighbor discovery + * + * Some fields being printed may not be included in the device discovery + * information (it is not mandatory). We are thus careful how the + * information is printed to ensure it is clear to the user what field is + * being referenced. + * The information being printed is for one time use - temporary storage is + * cleaned after it is printed. + * + * Ideally sysfs output should be on one line. The information printed here + * contain a few strings so it will be hard to parse if they are all + * printed on the same line - without agreeing on a standard field + * separator. + */ +static +ssize_t wlp_wss_neighborhood_print_remove(struct wlp *wlp, char *buf, + size_t bufsize) +{ + size_t used = 0; + struct wlp_neighbor_e *neighb; + struct wlp_wssid_e *wssid_e; + + mutex_lock(&wlp->nbmutex); + used = scnprintf(buf, bufsize, "#Neighbor information\n" + "#uuid dev_addr\n" + "# Device Name:\n# Model Name:\n# Manufacturer:\n" + "# Model Nr:\n# Serial:\n" + "# Pri Dev type: CategoryID OUI OUISubdiv " + "SubcategoryID\n" + "# WSS: WSSID WSS_name accept_enroll sec_status " + "bcast\n" + "# WSS: WSSID WSS_name accept_enroll sec_status " + "bcast\n\n"); + list_for_each_entry(neighb, &wlp->neighbors, node) { + if (bufsize - used <= 0) + goto out; + used += wlp_wss_uuid_print(buf + used, bufsize - used, + &neighb->uuid); + buf[used++] = ' '; + used += uwb_dev_addr_print(buf + used, bufsize - used, + &neighb->uwb_dev->dev_addr); + if (neighb->info != NULL) + used += scnprintf(buf + used, bufsize - used, + "\n Device Name: %s\n" + " Model Name: %s\n" + " Manufacturer:%s \n" + " Model Nr: %s\n" + " Serial: %s\n" + " Pri Dev type: " + "%u %02x:%02x:%02x %u %u\n", + neighb->info->name, + neighb->info->model_name, + neighb->info->manufacturer, + neighb->info->model_nr, + neighb->info->serial, + neighb->info->prim_dev_type.category, + neighb->info->prim_dev_type.OUI[0], + neighb->info->prim_dev_type.OUI[1], + neighb->info->prim_dev_type.OUI[2], + neighb->info->prim_dev_type.OUIsubdiv, + neighb->info->prim_dev_type.subID); + list_for_each_entry(wssid_e, &neighb->wssid, node) { + used += wlp_wss_wssid_e_print(buf + used, + bufsize - used, + wssid_e); + } + buf[used++] = '\n'; + wlp_remove_neighbor_tmp_info(neighb); + } + + +out: + mutex_unlock(&wlp->nbmutex); + return used; +} + + +/** + * Show properties of all WSS in neighborhood. + * + * Will trigger a complete discovery of WSS activated by this device and + * its neighbors. + */ +ssize_t wlp_neighborhood_show(struct wlp *wlp, char *buf) +{ + wlp_discover(wlp); + return wlp_wss_neighborhood_print_remove(wlp, buf, PAGE_SIZE); +} +EXPORT_SYMBOL_GPL(wlp_neighborhood_show); + +static +ssize_t __wlp_wss_properties_show(struct wlp_wss *wss, char *buf, + size_t bufsize) +{ + ssize_t result; + + result = wlp_wss_uuid_print(buf, bufsize, &wss->wssid); + result += scnprintf(buf + result, bufsize - result, " "); + result += uwb_mac_addr_print(buf + result, bufsize - result, + &wss->bcast); + result += scnprintf(buf + result, bufsize - result, + " 0x%02x %u ", wss->hash, wss->secure_status); + result += wlp_wss_key_print(buf + result, bufsize - result, + wss->master_key); + result += scnprintf(buf + result, bufsize - result, " 0x%02x ", + wss->tag); + result += uwb_mac_addr_print(buf + result, bufsize - result, + &wss->virtual_addr); + result += scnprintf(buf + result, bufsize - result, " %s", wss->name); + result += scnprintf(buf + result, bufsize - result, + "\n\n#WSSID\n#WSS broadcast address\n" + "#WSS hash\n#WSS secure status\n" + "#WSS master key\n#WSS local tag\n" + "#WSS local virtual EUI-48\n#WSS name\n"); + return result; +} + +/** + * Show which WSS is activated. + */ +ssize_t wlp_wss_activate_show(struct wlp_wss *wss, char *buf) +{ + int result = 0; + + if (mutex_lock_interruptible(&wss->mutex)) + goto out; + if (wss->state >= WLP_WSS_STATE_ACTIVE) + result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE); + else + result = scnprintf(buf, PAGE_SIZE, "No local WSS active.\n"); + result += scnprintf(buf + result, PAGE_SIZE - result, + "\n\n" + "# echo WSSID SECURE_STATUS ACCEPT_ENROLLMENT " + "NAME #create new WSS\n" + "# echo WSSID [DEV ADDR] #enroll in and activate " + "existing WSS, can request registrar\n" + "#\n" + "# WSSID is a 16 byte hex array. Eg. 12 A3 3B ... \n" + "# SECURE_STATUS 0 - unsecure, 1 - secure (default)\n" + "# ACCEPT_ENROLLMENT 0 - no, 1 - yes (default)\n" + "# NAME is the text string identifying the WSS\n" + "# DEV ADDR is the device address of neighbor " + "that should be registrar. Eg. 32:AB\n"); + + mutex_unlock(&wss->mutex); +out: + return result; + +} +EXPORT_SYMBOL_GPL(wlp_wss_activate_show); + +/** + * Create/activate a new WSS or enroll/activate in neighboring WSS + * + * The user can provide the WSSID of a WSS in which it wants to enroll. + * Only the WSSID is necessary if the WSS have been discovered before. If + * the WSS has not been discovered before, or the user wants to use a + * particular neighbor as its registrar, then the user can also provide a + * device address or the neighbor that will be used as registrar. + * + * A new WSS is created when the user provides a WSSID, secure status, and + * WSS name. + */ +ssize_t wlp_wss_activate_store(struct wlp_wss *wss, + const char *buf, size_t size) +{ + ssize_t result = -EINVAL; + struct wlp_uuid wssid; + struct uwb_dev_addr dev; + struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; + char name[65]; + unsigned sec_status, accept; + memset(name, 0, sizeof(name)); + result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx:%02hhx", + &wssid.data[0] , &wssid.data[1], + &wssid.data[2] , &wssid.data[3], + &wssid.data[4] , &wssid.data[5], + &wssid.data[6] , &wssid.data[7], + &wssid.data[8] , &wssid.data[9], + &wssid.data[10], &wssid.data[11], + &wssid.data[12], &wssid.data[13], + &wssid.data[14], &wssid.data[15], + &dev.data[1], &dev.data[0]); + if (result == 16 || result == 17) { + result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%u %u %64c", + &wssid.data[0] , &wssid.data[1], + &wssid.data[2] , &wssid.data[3], + &wssid.data[4] , &wssid.data[5], + &wssid.data[6] , &wssid.data[7], + &wssid.data[8] , &wssid.data[9], + &wssid.data[10], &wssid.data[11], + &wssid.data[12], &wssid.data[13], + &wssid.data[14], &wssid.data[15], + &sec_status, &accept, name); + if (result == 16) + result = wlp_wss_enroll_activate(wss, &wssid, &bcast); + else if (result == 19) { + sec_status = sec_status == 0 ? 0 : 1; + accept = accept == 0 ? 0 : 1; + /* We read name using %c, so the newline needs to be + * removed */ + if (strlen(name) != sizeof(name) - 1) + name[strlen(name) - 1] = '\0'; + result = wlp_wss_create_activate(wss, &wssid, name, + sec_status, accept); + } else + result = -EINVAL; + } else if (result == 18) + result = wlp_wss_enroll_activate(wss, &wssid, &dev); + else + result = -EINVAL; + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_wss_activate_store); + +/** + * Show the UUID of this host + */ +ssize_t wlp_uuid_show(struct wlp *wlp, char *buf) +{ + ssize_t result = 0; + + mutex_lock(&wlp->mutex); + result = wlp_wss_uuid_print(buf, PAGE_SIZE, &wlp->uuid); + buf[result++] = '\n'; + mutex_unlock(&wlp->mutex); + return result; +} +EXPORT_SYMBOL_GPL(wlp_uuid_show); + +/** + * Store a new UUID for this host + * + * According to the spec this should be encoded as an octet string in the + * order the octets are shown in string representation in RFC 4122 (WLP + * 0.99 [Table 6]) + * + * We do not check value provided by user. + */ +ssize_t wlp_uuid_store(struct wlp *wlp, const char *buf, size_t size) +{ + ssize_t result; + struct wlp_uuid uuid; + + mutex_lock(&wlp->mutex); + result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx ", + &uuid.data[0] , &uuid.data[1], + &uuid.data[2] , &uuid.data[3], + &uuid.data[4] , &uuid.data[5], + &uuid.data[6] , &uuid.data[7], + &uuid.data[8] , &uuid.data[9], + &uuid.data[10], &uuid.data[11], + &uuid.data[12], &uuid.data[13], + &uuid.data[14], &uuid.data[15]); + if (result != 16) { + result = -EINVAL; + goto error; + } + wlp->uuid = uuid; +error: + mutex_unlock(&wlp->mutex); + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_uuid_store); + +/** + * Show contents of members of device information structure + */ +#define wlp_dev_info_show(type) \ +ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf) \ +{ \ + ssize_t result = 0; \ + mutex_lock(&wlp->mutex); \ + if (wlp->dev_info == NULL) { \ + result = __wlp_setup_device_info(wlp); \ + if (result < 0) \ + goto out; \ + } \ + result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\ +out: \ + mutex_unlock(&wlp->mutex); \ + return result; \ +} \ +EXPORT_SYMBOL_GPL(wlp_dev_##type##_show); + +wlp_dev_info_show(name) +wlp_dev_info_show(model_name) +wlp_dev_info_show(model_nr) +wlp_dev_info_show(manufacturer) +wlp_dev_info_show(serial) + +/** + * Store contents of members of device information structure + */ +#define wlp_dev_info_store(type, len) \ +ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\ +{ \ + ssize_t result; \ + char format[10]; \ + mutex_lock(&wlp->mutex); \ + if (wlp->dev_info == NULL) { \ + result = __wlp_alloc_device_info(wlp); \ + if (result < 0) \ + goto out; \ + } \ + memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type)); \ + sprintf(format, "%%%uc", len); \ + result = sscanf(buf, format, wlp->dev_info->type); \ +out: \ + mutex_unlock(&wlp->mutex); \ + return result < 0 ? result : size; \ +} \ +EXPORT_SYMBOL_GPL(wlp_dev_##type##_store); + +wlp_dev_info_store(name, 32) +wlp_dev_info_store(manufacturer, 64) +wlp_dev_info_store(model_name, 32) +wlp_dev_info_store(model_nr, 32) +wlp_dev_info_store(serial, 32) + +static +const char *__wlp_dev_category[] = { + [WLP_DEV_CAT_COMPUTER] = "Computer", + [WLP_DEV_CAT_INPUT] = "Input device", + [WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER] = "Printer, scanner, FAX, or " + "Copier", + [WLP_DEV_CAT_CAMERA] = "Camera", + [WLP_DEV_CAT_STORAGE] = "Storage Network", + [WLP_DEV_CAT_INFRASTRUCTURE] = "Infrastructure", + [WLP_DEV_CAT_DISPLAY] = "Display", + [WLP_DEV_CAT_MULTIM] = "Multimedia device", + [WLP_DEV_CAT_GAMING] = "Gaming device", + [WLP_DEV_CAT_TELEPHONE] = "Telephone", + [WLP_DEV_CAT_OTHER] = "Other", +}; + +static +const char *wlp_dev_category_str(unsigned cat) +{ + if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE) + || cat == WLP_DEV_CAT_OTHER) + return __wlp_dev_category[cat]; + return "unknown category"; +} + +ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf) +{ + ssize_t result = 0; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_setup_device_info(wlp); + if (result < 0) + goto out; + } + result = scnprintf(buf, PAGE_SIZE, "%s\n", + wlp_dev_category_str(wlp->dev_info->prim_dev_type.category)); +out: + mutex_unlock(&wlp->mutex); + return result; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_category_show); + +ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf, + size_t size) +{ + ssize_t result; + u16 cat; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_alloc_device_info(wlp); + if (result < 0) + goto out; + } + result = sscanf(buf, "%hu", &cat); + if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE) + || cat == WLP_DEV_CAT_OTHER) + wlp->dev_info->prim_dev_type.category = cat; + else + result = -EINVAL; +out: + mutex_unlock(&wlp->mutex); + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_category_store); + +ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf) +{ + ssize_t result = 0; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_setup_device_info(wlp); + if (result < 0) + goto out; + } + result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n", + wlp->dev_info->prim_dev_type.OUI[0], + wlp->dev_info->prim_dev_type.OUI[1], + wlp->dev_info->prim_dev_type.OUI[2]); +out: + mutex_unlock(&wlp->mutex); + return result; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_show); + +ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size) +{ + ssize_t result; + u8 OUI[3]; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_alloc_device_info(wlp); + if (result < 0) + goto out; + } + result = sscanf(buf, "%hhx:%hhx:%hhx", + &OUI[0], &OUI[1], &OUI[2]); + if (result != 3) { + result = -EINVAL; + goto out; + } else + memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI)); +out: + mutex_unlock(&wlp->mutex); + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_store); + + +ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf) +{ + ssize_t result = 0; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_setup_device_info(wlp); + if (result < 0) + goto out; + } + result = scnprintf(buf, PAGE_SIZE, "%u\n", + wlp->dev_info->prim_dev_type.OUIsubdiv); +out: + mutex_unlock(&wlp->mutex); + return result; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_show); + +ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf, + size_t size) +{ + ssize_t result; + unsigned sub; + u8 max_sub = ~0; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_alloc_device_info(wlp); + if (result < 0) + goto out; + } + result = sscanf(buf, "%u", &sub); + if (sub <= max_sub) + wlp->dev_info->prim_dev_type.OUIsubdiv = sub; + else + result = -EINVAL; +out: + mutex_unlock(&wlp->mutex); + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_store); + +ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf) +{ + ssize_t result = 0; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_setup_device_info(wlp); + if (result < 0) + goto out; + } + result = scnprintf(buf, PAGE_SIZE, "%u\n", + wlp->dev_info->prim_dev_type.subID); +out: + mutex_unlock(&wlp->mutex); + return result; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_show); + +ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf, + size_t size) +{ + ssize_t result; + unsigned sub; + __le16 max_sub = ~0; + mutex_lock(&wlp->mutex); + if (wlp->dev_info == NULL) { + result = __wlp_alloc_device_info(wlp); + if (result < 0) + goto out; + } + result = sscanf(buf, "%u", &sub); + if (sub <= max_sub) + wlp->dev_info->prim_dev_type.subID = sub; + else + result = -EINVAL; +out: + mutex_unlock(&wlp->mutex); + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_store); + +/** + * Subsystem implementation for interaction with individual WSS via sysfs + * + * Followed instructions for subsystem in Documentation/filesystems/sysfs.txt + */ + +#define kobj_to_wlp_wss(obj) container_of(obj, struct wlp_wss, kobj) +#define attr_to_wlp_wss_attr(_attr) \ + container_of(_attr, struct wlp_wss_attribute, attr) + +/** + * Sysfs subsystem: forward read calls + * + * Sysfs operation for forwarding read call to the show method of the + * attribute owner + */ +static +ssize_t wlp_wss_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr); + struct wlp_wss *wss = kobj_to_wlp_wss(kobj); + ssize_t ret = -EIO; + + if (wss_attr->show) + ret = wss_attr->show(wss, buf); + return ret; +} +/** + * Sysfs subsystem: forward write calls + * + * Sysfs operation for forwarding write call to the store method of the + * attribute owner + */ +static +ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr); + struct wlp_wss *wss = kobj_to_wlp_wss(kobj); + ssize_t ret = -EIO; + + if (wss_attr->store) + ret = wss_attr->store(wss, buf, count); + return ret; +} + +static +struct sysfs_ops wss_sysfs_ops = { + .show = wlp_wss_attr_show, + .store = wlp_wss_attr_store, +}; + +struct kobj_type wss_ktype = { + .release = wlp_wss_release, + .sysfs_ops = &wss_sysfs_ops, +}; + + +/** + * Sysfs files for individual WSS + */ + +/** + * Print static properties of this WSS + * + * The name of a WSS may not be null teminated. It's max size is 64 bytes + * so we copy it to a larger array just to make sure we print sane data. + */ +static ssize_t wlp_wss_properties_show(struct wlp_wss *wss, char *buf) +{ + int result = 0; + + if (mutex_lock_interruptible(&wss->mutex)) + goto out; + result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE); + mutex_unlock(&wss->mutex); +out: + return result; +} +WSS_ATTR(properties, S_IRUGO, wlp_wss_properties_show, NULL); + +/** + * Print all connected members of this WSS + * The EDA cache contains all members of WSS neighborhood. + */ +static ssize_t wlp_wss_members_show(struct wlp_wss *wss, char *buf) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + return wlp_eda_show(wlp, buf); +} +WSS_ATTR(members, S_IRUGO, wlp_wss_members_show, NULL); + +static +const char *__wlp_strstate[] = { + "none", + "partially enrolled", + "enrolled", + "active", + "connected", +}; + +static const char *wlp_wss_strstate(unsigned state) +{ + if (state >= ARRAY_SIZE(__wlp_strstate)) + return "unknown state"; + return __wlp_strstate[state]; +} + +/* + * Print current state of this WSS + */ +static ssize_t wlp_wss_state_show(struct wlp_wss *wss, char *buf) +{ + int result = 0; + + if (mutex_lock_interruptible(&wss->mutex)) + goto out; + result = scnprintf(buf, PAGE_SIZE, "%s\n", + wlp_wss_strstate(wss->state)); + mutex_unlock(&wss->mutex); +out: + return result; +} +WSS_ATTR(state, S_IRUGO, wlp_wss_state_show, NULL); + + +static +struct attribute *wss_attrs[] = { + &wss_attr_properties.attr, + &wss_attr_members.attr, + &wss_attr_state.attr, + NULL, +}; + +struct attribute_group wss_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = wss_attrs, +}; diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c new file mode 100644 index 0000000..c701bd1 --- /dev/null +++ b/drivers/uwb/wlp/txrx.c @@ -0,0 +1,374 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * Message exchange infrastructure + * + * Copyright (C) 2007 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: Docs + * + */ + +#include +#include +#define D_LOCAL 5 +#include +#include "wlp-internal.h" + + +/** + * Direct incoming association msg to correct parsing routine + * + * We only expect D1, E1, C1, C3 messages as new. All other incoming + * association messages should form part of an established session that is + * handled elsewhere. + * The handling of these messages often require calling sleeping functions + * - this cannot be done in interrupt context. We use the kernel's + * workqueue to handle these messages. + */ +static +void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb, + struct uwb_dev_addr *src) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_frame_assoc *assoc = (void *) skb->data; + struct wlp_assoc_frame_ctx *frame_ctx; + d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); + frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); + if (frame_ctx == NULL) { + dev_err(dev, "WLP: Unable to allocate memory for association " + "frame handling.\n"); + kfree_skb(skb); + goto out; + } + frame_ctx->wlp = wlp; + frame_ctx->skb = skb; + frame_ctx->src = *src; + switch (assoc->type) { + case WLP_ASSOC_D1: + d_printf(5, dev, "Received a D1 frame.\n"); + INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); + schedule_work(&frame_ctx->ws); + break; + case WLP_ASSOC_E1: + d_printf(5, dev, "Received a E1 frame. FIXME?\n"); + kfree_skb(skb); /* Temporary until we handle it */ + kfree(frame_ctx); /* Temporary until we handle it */ + break; + case WLP_ASSOC_C1: + d_printf(5, dev, "Received a C1 frame.\n"); + INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); + schedule_work(&frame_ctx->ws); + break; + case WLP_ASSOC_C3: + d_printf(5, dev, "Received a C3 frame.\n"); + INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); + schedule_work(&frame_ctx->ws); + break; + default: + dev_err(dev, "Received unexpected association frame. " + "Type = %d \n", assoc->type); + kfree_skb(skb); + kfree(frame_ctx); + break; + } +out: + d_fnend(5, dev, "wlp %p\n", wlp); +} + +/** + * Process incoming association frame + * + * Although it could be possible to deal with some incoming association + * messages without creating a new session we are keeping things simple. We + * do not accept new association messages if there is a session in progress + * and the messages do not belong to that session. + * + * If an association message arrives that causes the creation of a session + * (WLP_ASSOC_E1) while we are in the process of creating a session then we + * rely on the neighbor mutex to protect the data. That is, the new session + * will not be started until the previous is completed. + */ +static +void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb, + struct uwb_dev_addr *src) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_frame_assoc *assoc = (void *) skb->data; + struct wlp_session *session = wlp->session; + u8 version; + d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); + + if (wlp_get_version(wlp, &assoc->version, &version, + sizeof(assoc->version)) < 0) + goto error; + if (version != WLP_VERSION) { + dev_err(dev, "Unsupported WLP version in association " + "message.\n"); + goto error; + } + if (session != NULL) { + /* Function that created this session is still holding the + * &wlp->mutex to protect this session. */ + if (assoc->type == session->exp_message || + assoc->type == WLP_ASSOC_F0) { + if (!memcmp(&session->neighbor_addr, src, + sizeof(*src))) { + session->data = skb; + (session->cb)(wlp); + } else { + dev_err(dev, "Received expected message from " + "unexpected source. Expected message " + "%d or F0 from %02x:%02x, but received " + "it from %02x:%02x. Dropping.\n", + session->exp_message, + session->neighbor_addr.data[1], + session->neighbor_addr.data[0], + src->data[1], src->data[0]); + goto error; + } + } else { + dev_err(dev, "Association already in progress. " + "Dropping.\n"); + goto error; + } + } else { + wlp_direct_assoc_frame(wlp, skb, src); + } + d_fnend(5, dev, "wlp %p\n", wlp); + return; +error: + kfree_skb(skb); + d_fnend(5, dev, "wlp %p\n", wlp); +} + +/** + * Verify incoming frame is from connected neighbor, prep to pass to WLP client + * + * Verification proceeds according to WLP 0.99 [7.3.1]. The source address + * is used to determine which neighbor is sending the frame and the WSS tag + * is used to know to which WSS the frame belongs (we only support one WSS + * so this test is straight forward). + * With the WSS found we need to ensure that we are connected before + * allowing the exchange of data frames. + */ +static +int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb, + struct uwb_dev_addr *src) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = -EINVAL; + struct wlp_eda_node eda_entry; + struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; + + d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); + /*verify*/ + result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Incoming frame is from unknown " + "neighbor %02x:%02x.\n", src->data[1], + src->data[0]); + goto out; + } + if (hdr->tag != eda_entry.tag) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Tag of incoming frame from " + "%02x:%02x does not match expected tag. " + "Received 0x%02x, expected 0x%02x. \n", + src->data[1], src->data[0], hdr->tag, + eda_entry.tag); + result = -EINVAL; + goto out; + } + if (eda_entry.state != WLP_WSS_CONNECTED) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Incoming frame from " + "%02x:%02x does is not from connected WSS.\n", + src->data[1], src->data[0]); + result = -EINVAL; + goto out; + } + /*prep*/ + skb_pull(skb, sizeof(*hdr)); +out: + d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); + return result; +} + +/** + * Receive a WLP frame from device + * + * @returns: 1 if calling function should free the skb + * 0 if it successfully handled skb and freed it + * 0 if error occured, will free skb in this case + */ +int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb, + struct uwb_dev_addr *src) +{ + unsigned len = skb->len; + void *ptr = skb->data; + struct wlp_frame_hdr *hdr; + int result = 0; + + d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len); + if (len < sizeof(*hdr)) { + dev_err(dev, "Not enough data to parse WLP header.\n"); + result = -EINVAL; + goto out; + } + hdr = ptr; + d_dump(6, dev, hdr, sizeof(*hdr)); + if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { + dev_err(dev, "Not a WLP frame type.\n"); + result = -EINVAL; + goto out; + } + switch (hdr->type) { + case WLP_FRAME_STANDARD: + if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) { + dev_err(dev, "Not enough data to parse Standard " + "WLP header.\n"); + goto out; + } + result = wlp_verify_prep_rx_frame(wlp, skb, src); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Verification of frame " + "from neighbor %02x:%02x failed.\n", + src->data[1], src->data[0]); + goto out; + } + result = 1; + break; + case WLP_FRAME_ABBREVIATED: + dev_err(dev, "Abbreviated frame received. FIXME?\n"); + kfree_skb(skb); + break; + case WLP_FRAME_CONTROL: + dev_err(dev, "Control frame received. FIXME?\n"); + kfree_skb(skb); + break; + case WLP_FRAME_ASSOCIATION: + if (len < sizeof(struct wlp_frame_assoc)) { + dev_err(dev, "Not enough data to parse Association " + "WLP header.\n"); + goto out; + } + d_printf(5, dev, "Association frame received.\n"); + wlp_receive_assoc_frame(wlp, skb, src); + break; + default: + dev_err(dev, "Invalid frame received.\n"); + result = -EINVAL; + break; + } +out: + if (result < 0) { + kfree_skb(skb); + result = 0; + } + d_fnend(6, dev, "skb (%p)\n", skb); + return result; +} +EXPORT_SYMBOL_GPL(wlp_receive_frame); + + +/** + * Verify frame from network stack, prepare for further transmission + * + * @skb: the socket buffer that needs to be prepared for transmission (it + * is in need of a WLP header). If this is a broadcast frame we take + * over the entire transmission. + * If it is a unicast the WSS connection should already be established + * and transmission will be done by the calling function. + * @dst: On return this will contain the device address to which the + * frame is destined. + * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer, + * calling function can proceed with tx + * 1 on success with tx : WLP will take over transmission of this + * frame + * <0 on error + * + * The network stack (WLP client) is attempting to transmit a frame. We can + * only transmit data if a local WSS is at least active (connection will be + * done here if this is a broadcast frame and neighbor also has the WSS + * active). + * + * The frame can be either broadcast or unicast. Broadcast in a WSS is + * supported via multicast, but we don't support multicast yet (until + * devices start to support MAB IEs). If a broadcast frame needs to be + * transmitted it is treated as a unicast frame to each neighbor. In this + * case the WLP takes over transmission of the skb and returns 1 + * to the caller to indicate so. Also, in this case, if a neighbor has the + * same WSS activated but is not connected then the WSS connection will be + * done at this time. The neighbor's virtual address will be learned at + * this time. + * + * The destination address in a unicast frame is the virtual address of the + * neighbor. This address only becomes known when a WSS connection is + * established. We thus rely on a broadcast frame to trigger the setup of + * WSS connections to all neighbors before we are able to send unicast + * frames to them. This seems reasonable as IP would usually use ARP first + * before any unicast frames are sent. + * + * If we are already connected to the neighbor (neighbor's virtual address + * is known) we just prepare the WLP header and the caller will continue to + * send the frame. + * + * A failure in this function usually indicates something that cannot be + * fixed automatically. So, if this function fails (@return < 0) the calling + * function should not retry to send the frame as it will very likely keep + * failing. + * + */ +int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, + struct sk_buff *skb, struct uwb_dev_addr *dst) +{ + int result = -EINVAL; + struct ethhdr *eth_hdr = (void *) skb->data; + + d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb); + if (is_broadcast_ether_addr(eth_hdr->h_dest)) { + d_printf(6, dev, "WLP: handling broadcast frame. \n"); + result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "Unable to handle broadcast " + "frame from WLP client.\n"); + goto out; + } + dev_kfree_skb_irq(skb); + result = 1; + /* Frame will be transmitted by WLP. */ + } else { + d_printf(6, dev, "WLP: handling unicast frame. \n"); + result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, + wlp_wss_prep_hdr, skb); + if (unlikely(result < 0)) { + if (printk_ratelimit()) + dev_err(dev, "Unable to prepare " + "skb for transmission. \n"); + goto out; + } + } +out: + d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result); + return result; +} +EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame); diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h new file mode 100644 index 0000000..1c94fab --- /dev/null +++ b/drivers/uwb/wlp/wlp-internal.h @@ -0,0 +1,228 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * Internal API + * + * Copyright (C) 2007 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __WLP_INTERNAL_H__ +#define __WLP_INTERNAL_H__ + +/** + * State of WSS connection + * + * A device needs to connect to a neighbor in an activated WSS before data + * can be transmitted. The spec also distinguishes between a new connection + * attempt and a connection attempt after previous connection attempts. The + * state WLP_WSS_CONNECT_FAILED is used for this scenario. See WLP 0.99 + * [7.2.6] + */ +enum wlp_wss_connect { + WLP_WSS_UNCONNECTED = 0, + WLP_WSS_CONNECTED, + WLP_WSS_CONNECT_FAILED, +}; + +extern struct kobj_type wss_ktype; +extern struct attribute_group wss_attr_group; + +extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); +extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); + + +/* This should be changed to a dynamic array where entries are sorted + * by eth_addr and search is done in a binary form + * + * Although thinking twice about it: this technologie's maximum reach + * is 10 meters...unless you want to pack too much stuff in around + * your radio controller/WLP device, the list will probably not be + * too big. + * + * In any case, there is probably some data structure in the kernel + * than we could reused for that already. + * + * The below structure is really just good while we support one WSS per + * host. + */ +struct wlp_eda_node { + struct list_head list_node; + unsigned char eth_addr[ETH_ALEN]; + struct uwb_dev_addr dev_addr; + struct wlp_wss *wss; + unsigned char virt_addr[ETH_ALEN]; + u8 tag; + enum wlp_wss_connect state; +}; + +typedef int (*wlp_eda_for_each_f)(struct wlp *, struct wlp_eda_node *, void *); + +extern void wlp_eda_init(struct wlp_eda *); +extern void wlp_eda_release(struct wlp_eda *); +extern int wlp_eda_create_node(struct wlp_eda *, + const unsigned char eth_addr[ETH_ALEN], + const struct uwb_dev_addr *); +extern void wlp_eda_rm_node(struct wlp_eda *, const struct uwb_dev_addr *); +extern int wlp_eda_update_node(struct wlp_eda *, + const struct uwb_dev_addr *, + struct wlp_wss *, + const unsigned char virt_addr[ETH_ALEN], + const u8, const enum wlp_wss_connect); +extern int wlp_eda_update_node_state(struct wlp_eda *, + const struct uwb_dev_addr *, + const enum wlp_wss_connect); + +extern int wlp_copy_eda_node(struct wlp_eda *, struct uwb_dev_addr *, + struct wlp_eda_node *); +extern int wlp_eda_for_each(struct wlp_eda *, wlp_eda_for_each_f , void *); +extern int wlp_eda_for_virtual(struct wlp_eda *, + const unsigned char eth_addr[ETH_ALEN], + struct uwb_dev_addr *, + wlp_eda_for_each_f , void *); + + +extern void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *); + +extern size_t wlp_wss_key_print(char *, size_t, u8 *); + +/* Function called when no more references to WSS exists */ +extern void wlp_wss_release(struct kobject *); + +extern void wlp_wss_reset(struct wlp_wss *); +extern int wlp_wss_create_activate(struct wlp_wss *, struct wlp_uuid *, + char *, unsigned, unsigned); +extern int wlp_wss_enroll_activate(struct wlp_wss *, struct wlp_uuid *, + struct uwb_dev_addr *); +extern ssize_t wlp_discover(struct wlp *); + +extern int wlp_enroll_neighbor(struct wlp *, struct wlp_neighbor_e *, + struct wlp_wss *, struct wlp_uuid *); +extern int wlp_wss_is_active(struct wlp *, struct wlp_wss *, + struct uwb_dev_addr *); + +struct wlp_assoc_conn_ctx { + struct work_struct ws; + struct wlp *wlp; + struct sk_buff *skb; + struct wlp_eda_node eda_entry; +}; + + +extern int wlp_wss_connect_prep(struct wlp *, struct wlp_eda_node *, void *); +extern int wlp_wss_send_copy(struct wlp *, struct wlp_eda_node *, void *); + + +/* Message handling */ +struct wlp_assoc_frame_ctx { + struct work_struct ws; + struct wlp *wlp; + struct sk_buff *skb; + struct uwb_dev_addr src; +}; + +extern int wlp_wss_prep_hdr(struct wlp *, struct wlp_eda_node *, void *); +extern void wlp_handle_d1_frame(struct work_struct *); +extern int wlp_parse_d2_frame_to_cache(struct wlp *, struct sk_buff *, + struct wlp_neighbor_e *); +extern int wlp_parse_d2_frame_to_enroll(struct wlp_wss *, struct sk_buff *, + struct wlp_neighbor_e *, + struct wlp_uuid *); +extern void wlp_handle_c1_frame(struct work_struct *); +extern void wlp_handle_c3_frame(struct work_struct *); +extern int wlp_parse_c3c4_frame(struct wlp *, struct sk_buff *, + struct wlp_uuid *, u8 *, + struct uwb_mac_addr *); +extern int wlp_parse_f0(struct wlp *, struct sk_buff *); +extern int wlp_send_assoc_frame(struct wlp *, struct wlp_wss *, + struct uwb_dev_addr *, enum wlp_assoc_type); +extern ssize_t wlp_get_version(struct wlp *, struct wlp_attr_version *, + u8 *, ssize_t); +extern ssize_t wlp_get_wssid(struct wlp *, struct wlp_attr_wssid *, + struct wlp_uuid *, ssize_t); +extern int __wlp_alloc_device_info(struct wlp *); +extern int __wlp_setup_device_info(struct wlp *); + +extern struct wlp_wss_attribute wss_attribute_properties; +extern struct wlp_wss_attribute wss_attribute_members; +extern struct wlp_wss_attribute wss_attribute_state; + +static inline +size_t wlp_wss_uuid_print(char *buf, size_t bufsize, struct wlp_uuid *uuid) +{ + size_t result; + + result = scnprintf(buf, bufsize, + "%02x:%02x:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x", + uuid->data[0], uuid->data[1], + uuid->data[2], uuid->data[3], + uuid->data[4], uuid->data[5], + uuid->data[6], uuid->data[7], + uuid->data[8], uuid->data[9], + uuid->data[10], uuid->data[11], + uuid->data[12], uuid->data[13], + uuid->data[14], uuid->data[15]); + return result; +} + +/** + * FIXME: How should a nonce be displayed? + */ +static inline +size_t wlp_wss_nonce_print(char *buf, size_t bufsize, struct wlp_nonce *nonce) +{ + size_t result; + + result = scnprintf(buf, bufsize, + "%02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x", + nonce->data[0], nonce->data[1], + nonce->data[2], nonce->data[3], + nonce->data[4], nonce->data[5], + nonce->data[6], nonce->data[7], + nonce->data[8], nonce->data[9], + nonce->data[10], nonce->data[11], + nonce->data[12], nonce->data[13], + nonce->data[14], nonce->data[15]); + return result; +} + + +static inline +void wlp_session_cb(struct wlp *wlp) +{ + struct completion *completion = wlp->session->cb_priv; + complete(completion); +} + +static inline +int wlp_uuid_is_set(struct wlp_uuid *uuid) +{ + struct wlp_uuid zero_uuid = { .data = { 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} }; + + if (!memcmp(uuid, &zero_uuid, sizeof(*uuid))) + return 0; + return 1; +} + +#endif /* __WLP_INTERNAL_H__ */ diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c new file mode 100644 index 0000000..0799402 --- /dev/null +++ b/drivers/uwb/wlp/wlp-lc.c @@ -0,0 +1,585 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * + * Copyright (C) 2005-2006 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#define D_LOCAL 6 +#include +#include "wlp-internal.h" + + +static +void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) +{ + INIT_LIST_HEAD(&neighbor->wssid); +} + +/** + * Create area for device information storage + * + * wlp->mutex must be held + */ +int __wlp_alloc_device_info(struct wlp *wlp) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + BUG_ON(wlp->dev_info != NULL); + wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL); + if (wlp->dev_info == NULL) { + dev_err(dev, "WLP: Unable to allocate memory for " + "device information.\n"); + return -ENOMEM; + } + return 0; +} + + +/** + * Fill in device information using function provided by driver + * + * wlp->mutex must be held + */ +static +void __wlp_fill_device_info(struct wlp *wlp) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + + BUG_ON(wlp->fill_device_info == NULL); + d_printf(6, dev, "Retrieving device information " + "from device driver.\n"); + wlp->fill_device_info(wlp, wlp->dev_info); +} + +/** + * Setup device information + * + * Allocate area for device information and populate it. + * + * wlp->mutex must be held + */ +int __wlp_setup_device_info(struct wlp *wlp) +{ + int result; + struct device *dev = &wlp->rc->uwb_dev.dev; + + result = __wlp_alloc_device_info(wlp); + if (result < 0) { + dev_err(dev, "WLP: Unable to allocate area for " + "device information.\n"); + return result; + } + __wlp_fill_device_info(wlp); + return 0; +} + +/** + * Remove information about neighbor stored temporarily + * + * Information learned during discovey should only be stored when the + * device enrolls in the neighbor's WSS. We do need to store this + * information temporarily in order to present it to the user. + * + * We are only interested in keeping neighbor WSS information if that + * neighbor is accepting enrollment. + * + * should be called with wlp->nbmutex held + */ +void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor) +{ + struct wlp_wssid_e *wssid_e, *next; + u8 keep; + if (!list_empty(&neighbor->wssid)) { + list_for_each_entry_safe(wssid_e, next, &neighbor->wssid, + node) { + if (wssid_e->info != NULL) { + keep = wssid_e->info->accept_enroll; + kfree(wssid_e->info); + wssid_e->info = NULL; + if (!keep) { + list_del(&wssid_e->node); + kfree(wssid_e); + } + } + } + } + if (neighbor->info != NULL) { + kfree(neighbor->info); + neighbor->info = NULL; + } +} + +/** + * Populate WLP neighborhood cache with neighbor information + * + * A new neighbor is found. If it is discoverable then we add it to the + * neighborhood cache. + * + */ +static +int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev) +{ + int result = 0; + int discoverable; + struct wlp_neighbor_e *neighbor; + + d_fnstart(6, &dev->dev, "uwb %p \n", dev); + d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n", + dev->dev_addr.data[1], dev->dev_addr.data[0]); + /** + * FIXME: + * Use contents of WLP IE found in beacon cache to determine if + * neighbor is discoverable. + * The device does not support WLP IE yet so this still needs to be + * done. Until then we assume all devices are discoverable. + */ + discoverable = 1; /* will be changed when FIXME disappears */ + if (discoverable) { + /* Add neighbor to cache for discovery */ + neighbor = kzalloc(sizeof(*neighbor), GFP_KERNEL); + if (neighbor == NULL) { + dev_err(&dev->dev, "Unable to create memory for " + "new neighbor. \n"); + result = -ENOMEM; + goto error_no_mem; + } + wlp_neighbor_init(neighbor); + uwb_dev_get(dev); + neighbor->uwb_dev = dev; + list_add(&neighbor->node, &wlp->neighbors); + } +error_no_mem: + d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result); + return result; +} + +/** + * Remove one neighbor from cache + */ +static +void __wlp_neighbor_release(struct wlp_neighbor_e *neighbor) +{ + struct wlp_wssid_e *wssid_e, *next_wssid_e; + + list_for_each_entry_safe(wssid_e, next_wssid_e, + &neighbor->wssid, node) { + list_del(&wssid_e->node); + kfree(wssid_e); + } + uwb_dev_put(neighbor->uwb_dev); + list_del(&neighbor->node); + kfree(neighbor); +} + +/** + * Clear entire neighborhood cache. + */ +static +void __wlp_neighbors_release(struct wlp *wlp) +{ + struct wlp_neighbor_e *neighbor, *next; + if (list_empty(&wlp->neighbors)) + return; + list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { + __wlp_neighbor_release(neighbor); + } +} + +static +void wlp_neighbors_release(struct wlp *wlp) +{ + mutex_lock(&wlp->nbmutex); + __wlp_neighbors_release(wlp); + mutex_unlock(&wlp->nbmutex); +} + + + +/** + * Send D1 message to neighbor, receive D2 message + * + * @neighbor: neighbor to which D1 message will be sent + * @wss: if not NULL, it is an enrollment request for this WSS + * @wssid: if wss not NULL, this is the wssid of the WSS in which we + * want to enroll + * + * A D1/D2 exchange is done for one of two reasons: discovery or + * enrollment. If done for discovery the D1 message is sent to the neighbor + * and the contents of the D2 response is stored in a temporary cache. + * If done for enrollment the @wss and @wssid are provided also. In this + * case the D1 message is sent to the neighbor, the D2 response is parsed + * for enrollment of the WSS with wssid. + * + * &wss->mutex is held + */ +static +int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor, + struct wlp_wss *wss, struct wlp_uuid *wssid) +{ + int result; + struct device *dev = &wlp->rc->uwb_dev.dev; + DECLARE_COMPLETION_ONSTACK(completion); + struct wlp_session session; + struct sk_buff *skb; + struct wlp_frame_assoc *resp; + struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; + + mutex_lock(&wlp->mutex); + if (!wlp_uuid_is_set(&wlp->uuid)) { + dev_err(dev, "WLP: UUID is not set. Set via sysfs to " + "proceed.\n"); + result = -ENXIO; + goto out; + } + /* Send D1 association frame */ + result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_D1); + if (result < 0) { + dev_err(dev, "Unable to send D1 frame to neighbor " + "%02x:%02x (%d)\n", dev_addr->data[1], + dev_addr->data[0], result); + d_printf(6, dev, "Add placeholders into buffer next to " + "neighbor information we have (dev address).\n"); + goto out; + } + /* Create session, wait for response */ + session.exp_message = WLP_ASSOC_D2; + session.cb = wlp_session_cb; + session.cb_priv = &completion; + session.neighbor_addr = *dev_addr; + BUG_ON(wlp->session != NULL); + wlp->session = &session; + /* Wait for D2/F0 frame */ + result = wait_for_completion_interruptible_timeout(&completion, + WLP_PER_MSG_TIMEOUT * HZ); + if (result == 0) { + result = -ETIMEDOUT; + dev_err(dev, "Timeout while sending D1 to neighbor " + "%02x:%02x.\n", dev_addr->data[1], + dev_addr->data[0]); + goto error_session; + } + if (result < 0) { + dev_err(dev, "Unable to discover/enroll neighbor %02x:%02x.\n", + dev_addr->data[1], dev_addr->data[0]); + goto error_session; + } + /* Parse message in session->data: it will be either D2 or F0 */ + skb = session.data; + resp = (void *) skb->data; + d_printf(6, dev, "Received response to D1 frame. \n"); + d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len); + + if (resp->type == WLP_ASSOC_F0) { + result = wlp_parse_f0(wlp, skb); + if (result < 0) + dev_err(dev, "WLP: Unable to parse F0 from neighbor " + "%02x:%02x.\n", dev_addr->data[1], + dev_addr->data[0]); + result = -EINVAL; + goto error_resp_parse; + } + if (wss == NULL) { + /* Discovery */ + result = wlp_parse_d2_frame_to_cache(wlp, skb, neighbor); + if (result < 0) { + dev_err(dev, "WLP: Unable to parse D2 message from " + "neighbor %02x:%02x for discovery.\n", + dev_addr->data[1], dev_addr->data[0]); + goto error_resp_parse; + } + } else { + /* Enrollment */ + result = wlp_parse_d2_frame_to_enroll(wss, skb, neighbor, + wssid); + if (result < 0) { + dev_err(dev, "WLP: Unable to parse D2 message from " + "neighbor %02x:%02x for enrollment.\n", + dev_addr->data[1], dev_addr->data[0]); + goto error_resp_parse; + } + } +error_resp_parse: + kfree_skb(skb); +error_session: + wlp->session = NULL; +out: + mutex_unlock(&wlp->mutex); + return result; +} + +/** + * Enroll into WSS of provided WSSID by using neighbor as registrar + * + * &wss->mutex is held + */ +int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor, + struct wlp_wss *wss, struct wlp_uuid *wssid) +{ + int result = 0; + struct device *dev = &wlp->rc->uwb_dev.dev; + char buf[WLP_WSS_UUID_STRSIZE]; + struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; + wlp_wss_uuid_print(buf, sizeof(buf), wssid); + d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", + wlp, neighbor, wss, wssid, buf); + d_printf(6, dev, "Complete me.\n"); + result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); + if (result < 0) { + dev_err(dev, "WLP: D1/D2 message exchange for enrollment " + "failed. result = %d \n", result); + goto out; + } + if (wss->state != WLP_WSS_STATE_PART_ENROLLED) { + dev_err(dev, "WLP: Unable to enroll into WSS %s using " + "neighbor %02x:%02x. \n", buf, + dev_addr->data[1], dev_addr->data[0]); + result = -EINVAL; + goto out; + } + if (wss->secure_status == WLP_WSS_SECURE) { + dev_err(dev, "FIXME: need to complete secure enrollment.\n"); + result = -EINVAL; + goto error; + } else { + wss->state = WLP_WSS_STATE_ENROLLED; + d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS " + "%s using neighbor %02x:%02x. \n", buf, + dev_addr->data[1], dev_addr->data[0]); + } + + d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", + wlp, neighbor, wss, wssid, buf); +out: + return result; +error: + wlp_wss_reset(wss); + return result; +} + +/** + * Discover WSS information of neighbor's active WSS + */ +static +int wlp_discover_neighbor(struct wlp *wlp, + struct wlp_neighbor_e *neighbor) +{ + return wlp_d1d2_exchange(wlp, neighbor, NULL, NULL); +} + + +/** + * Each neighbor in the neighborhood cache is discoverable. Discover it. + * + * Discovery is done through sending of D1 association frame and parsing + * the D2 association frame response. Only wssid from D2 will be included + * in neighbor cache, rest is just displayed to user and forgotten. + * + * The discovery is not done in parallel. This is simple and enables us to + * maintain only one association context. + * + * The discovery of one neighbor does not affect the other, but if the + * discovery of a neighbor fails it is removed from the neighborhood cache. + */ +static +int wlp_discover_all_neighbors(struct wlp *wlp) +{ + int result = 0; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_neighbor_e *neighbor, *next; + + list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { + result = wlp_discover_neighbor(wlp, neighbor); + if (result < 0) { + dev_err(dev, "WLP: Unable to discover neighbor " + "%02x:%02x, removing from neighborhood. \n", + neighbor->uwb_dev->dev_addr.data[1], + neighbor->uwb_dev->dev_addr.data[0]); + __wlp_neighbor_release(neighbor); + } + } + return result; +} + +static int wlp_add_neighbor_helper(struct device *dev, void *priv) +{ + struct wlp *wlp = priv; + struct uwb_dev *uwb_dev = to_uwb_dev(dev); + + return wlp_add_neighbor(wlp, uwb_dev); +} + +/** + * Discover WLP neighborhood + * + * Will send D1 association frame to all devices in beacon group that have + * discoverable bit set in WLP IE. D2 frames will be received, information + * displayed to user in @buf. Partial information (from D2 association + * frame) will be cached to assist with future association + * requests. + * + * The discovery of the WLP neighborhood is triggered by the user. This + * should occur infrequently and we thus free current cache and re-allocate + * memory if needed. + * + * If one neighbor fails during initial discovery (determining if it is a + * neighbor or not), we fail all - note that interaction with neighbor has + * not occured at this point so if a failure occurs we know something went wrong + * locally. We thus undo everything. + */ +ssize_t wlp_discover(struct wlp *wlp) +{ + int result = 0; + struct device *dev = &wlp->rc->uwb_dev.dev; + + d_fnstart(6, dev, "wlp %p \n", wlp); + mutex_lock(&wlp->nbmutex); + /* Clear current neighborhood cache. */ + __wlp_neighbors_release(wlp); + /* Determine which devices in neighborhood. Repopulate cache. */ + result = uwb_dev_for_each(wlp->rc, wlp_add_neighbor_helper, wlp); + if (result < 0) { + /* May have partial neighbor information, release all. */ + __wlp_neighbors_release(wlp); + goto error_dev_for_each; + } + /* Discover the properties of devices in neighborhood. */ + result = wlp_discover_all_neighbors(wlp); + /* In case of failure we still print our partial results. */ + if (result < 0) { + dev_err(dev, "Unable to fully discover neighborhood. \n"); + result = 0; + } +error_dev_for_each: + mutex_unlock(&wlp->nbmutex); + d_fnend(6, dev, "wlp %p \n", wlp); + return result; +} + +/** + * Handle events from UWB stack + * + * We handle events conservatively. If a neighbor goes off the air we + * remove it from the neighborhood. If an association process is in + * progress this function will block waiting for the nbmutex to become + * free. The association process will thus be allowed to complete before it + * is removed. + */ +static +void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, + enum uwb_notifs event) +{ + struct wlp *wlp = _wlp; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_neighbor_e *neighbor, *next; + int result; + switch (event) { + case UWB_NOTIF_ONAIR: + d_printf(6, dev, "UWB device %02x:%02x is onair\n", + uwb_dev->dev_addr.data[1], + uwb_dev->dev_addr.data[0]); + result = wlp_eda_create_node(&wlp->eda, + uwb_dev->mac_addr.data, + &uwb_dev->dev_addr); + if (result < 0) + dev_err(dev, "WLP: Unable to add new neighbor " + "%02x:%02x to EDA cache.\n", + uwb_dev->dev_addr.data[1], + uwb_dev->dev_addr.data[0]); + break; + case UWB_NOTIF_OFFAIR: + d_printf(6, dev, "UWB device %02x:%02x is offair\n", + uwb_dev->dev_addr.data[1], + uwb_dev->dev_addr.data[0]); + wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); + mutex_lock(&wlp->nbmutex); + list_for_each_entry_safe(neighbor, next, &wlp->neighbors, + node) { + if (neighbor->uwb_dev == uwb_dev) { + d_printf(6, dev, "Removing device from " + "neighborhood.\n"); + __wlp_neighbor_release(neighbor); + } + } + mutex_unlock(&wlp->nbmutex); + break; + default: + dev_err(dev, "don't know how to handle event %d from uwb\n", + event); + } +} + +int wlp_setup(struct wlp *wlp, struct uwb_rc *rc) +{ + struct device *dev = &rc->uwb_dev.dev; + int result; + + d_fnstart(6, dev, "wlp %p\n", wlp); + BUG_ON(wlp->fill_device_info == NULL); + BUG_ON(wlp->xmit_frame == NULL); + BUG_ON(wlp->stop_queue == NULL); + BUG_ON(wlp->start_queue == NULL); + wlp->rc = rc; + wlp_eda_init(&wlp->eda);/* Set up address cache */ + wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; + wlp->uwb_notifs_handler.data = wlp; + uwb_notifs_register(rc, &wlp->uwb_notifs_handler); + + uwb_pal_init(&wlp->pal); + result = uwb_pal_register(rc, &wlp->pal); + if (result < 0) + uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); + + d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); + return result; +} +EXPORT_SYMBOL_GPL(wlp_setup); + +void wlp_remove(struct wlp *wlp) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + d_fnstart(6, dev, "wlp %p\n", wlp); + wlp_neighbors_release(wlp); + uwb_pal_unregister(wlp->rc, &wlp->pal); + uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); + wlp_eda_release(&wlp->eda); + mutex_lock(&wlp->mutex); + if (wlp->dev_info != NULL) + kfree(wlp->dev_info); + mutex_unlock(&wlp->mutex); + wlp->rc = NULL; + /* We have to use NULL here because this function can be called + * when the device disappeared. */ + d_fnend(6, NULL, "wlp %p\n", wlp); +} +EXPORT_SYMBOL_GPL(wlp_remove); + +/** + * wlp_reset_all - reset the WLP hardware + * @wlp: the WLP device to reset. + * + * This schedules a full hardware reset of the WLP device. The radio + * controller and any other PALs will also be reset. + */ +void wlp_reset_all(struct wlp *wlp) +{ + uwb_rc_reset_all(wlp->rc); +} +EXPORT_SYMBOL_GPL(wlp_reset_all); -- cgit v0.10.2 From e377e9d32d4945fe6a14775b3a4d9ecd1462e36a Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Wed, 17 Sep 2008 16:34:17 +0100 Subject: uwb: add WiMedia LLC Protocol stack (messages) Add the WLP message formatting/decoding code. Signed-off-by: David Vrabel diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c new file mode 100644 index 0000000..a64cb82 --- /dev/null +++ b/drivers/uwb/wlp/messages.c @@ -0,0 +1,1946 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * Message construction and parsing + * + * Copyright (C) 2007 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#define D_LOCAL 6 +#include +#include "wlp-internal.h" + +static +const char *__wlp_assoc_frame[] = { + [WLP_ASSOC_D1] = "WLP_ASSOC_D1", + [WLP_ASSOC_D2] = "WLP_ASSOC_D2", + [WLP_ASSOC_M1] = "WLP_ASSOC_M1", + [WLP_ASSOC_M2] = "WLP_ASSOC_M2", + [WLP_ASSOC_M3] = "WLP_ASSOC_M3", + [WLP_ASSOC_M4] = "WLP_ASSOC_M4", + [WLP_ASSOC_M5] = "WLP_ASSOC_M5", + [WLP_ASSOC_M6] = "WLP_ASSOC_M6", + [WLP_ASSOC_M7] = "WLP_ASSOC_M7", + [WLP_ASSOC_M8] = "WLP_ASSOC_M8", + [WLP_ASSOC_F0] = "WLP_ASSOC_F0", + [WLP_ASSOC_E1] = "WLP_ASSOC_E1", + [WLP_ASSOC_E2] = "WLP_ASSOC_E2", + [WLP_ASSOC_C1] = "WLP_ASSOC_C1", + [WLP_ASSOC_C2] = "WLP_ASSOC_C2", + [WLP_ASSOC_C3] = "WLP_ASSOC_C3", + [WLP_ASSOC_C4] = "WLP_ASSOC_C4", +}; + +static const char *wlp_assoc_frame_str(unsigned id) +{ + if (id >= ARRAY_SIZE(__wlp_assoc_frame)) + return "unknown association frame"; + return __wlp_assoc_frame[id]; +} + +static const char *__wlp_assc_error[] = { + "none", + "Authenticator Failure", + "Rogue activity suspected", + "Device busy", + "Setup Locked", + "Registrar not ready", + "Invalid WSS selection", + "Message timeout", + "Enrollment session timeout", + "Device password invalid", + "Unsupported version", + "Internal error", + "Undefined error", + "Numeric comparison failure", + "Waiting for user input", +}; + +static const char *wlp_assc_error_str(unsigned id) +{ + if (id >= ARRAY_SIZE(__wlp_assc_error)) + return "unknown WLP association error"; + return __wlp_assc_error[id]; +} + +static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type, + size_t len) +{ + hdr->type = cpu_to_le16(type); + hdr->length = cpu_to_le16(len); +} + +/* + * Populate fields of a constant sized attribute + * + * @returns: total size of attribute including size of new value + * + * We have two instances of this function (wlp_pset and wlp_set): one takes + * the value as a parameter, the other takes a pointer to the value as + * parameter. They thus only differ in how the value is assigned to the + * attribute. + * + * We use sizeof(*attr) - sizeof(struct wlp_attr_hdr) instead of + * sizeof(type) to be able to use this same code for the structures that + * contain 8bit enum values and be able to deal with pointer types. + */ +#define wlp_set(type, type_code, name) \ +static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ +{ \ + d_fnstart(6, NULL, "(attribute %p)\n", attr); \ + wlp_set_attr_hdr(&attr->hdr, type_code, \ + sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ + attr->name = value; \ + d_dump(6, NULL, attr, sizeof(*attr)); \ + d_fnend(6, NULL, "(attribute %p)\n", attr); \ + return sizeof(*attr); \ +} + +#define wlp_pset(type, type_code, name) \ +static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ +{ \ + d_fnstart(6, NULL, "(attribute %p)\n", attr); \ + wlp_set_attr_hdr(&attr->hdr, type_code, \ + sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ + attr->name = *value; \ + d_dump(6, NULL, attr, sizeof(*attr)); \ + d_fnend(6, NULL, "(attribute %p)\n", attr); \ + return sizeof(*attr); \ +} + +/** + * Populate fields of a variable attribute + * + * @returns: total size of attribute including size of new value + * + * Provided with a pointer to the memory area reserved for the + * attribute structure, the field is populated with the value. The + * reserved memory has to contain enough space for the value. + */ +#define wlp_vset(type, type_code, name) \ +static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ + size_t len) \ +{ \ + d_fnstart(6, NULL, "(attribute %p)\n", attr); \ + wlp_set_attr_hdr(&attr->hdr, type_code, len); \ + memcpy(attr->name, value, len); \ + d_dump(6, NULL, attr, sizeof(*attr) + len); \ + d_fnend(6, NULL, "(attribute %p)\n", attr); \ + return sizeof(*attr) + len; \ +} + +wlp_vset(char *, WLP_ATTR_DEV_NAME, dev_name) +wlp_vset(char *, WLP_ATTR_MANUF, manufacturer) +wlp_set(enum wlp_assoc_type, WLP_ATTR_MSG_TYPE, msg_type) +wlp_vset(char *, WLP_ATTR_MODEL_NAME, model_name) +wlp_vset(char *, WLP_ATTR_MODEL_NR, model_nr) +wlp_vset(char *, WLP_ATTR_SERIAL, serial) +wlp_vset(char *, WLP_ATTR_WSS_NAME, wss_name) +wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_E, uuid_e) +wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_R, uuid_r) +wlp_pset(struct wlp_uuid *, WLP_ATTR_WSSID, wssid) +wlp_pset(struct wlp_dev_type *, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type) +/*wlp_pset(struct wlp_dev_type *, WLP_ATTR_SEC_DEV_TYPE, sec_dev_type)*/ +wlp_set(u8, WLP_ATTR_WLP_VER, version) +wlp_set(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err) +wlp_set(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd) +wlp_set(u8, WLP_ATTR_ACC_ENRL, accept_enrl) +wlp_set(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status) +wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_BCAST, wss_bcast) +wlp_pset(struct wlp_nonce *, WLP_ATTR_ENRL_NONCE, enonce) +wlp_pset(struct wlp_nonce *, WLP_ATTR_REG_NONCE, rnonce) +wlp_set(u8, WLP_ATTR_WSS_TAG, wss_tag) +wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_VIRT, wss_virt) + +/** + * Fill in the WSS information attributes + * + * We currently only support one WSS, and this is assumed in this function + * that can populate only one WSS information attribute. + */ +static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr, + struct wlp_wss *wss) +{ + size_t datalen; + void *ptr = attr->wss_info; + size_t used = sizeof(*attr); + d_fnstart(6, NULL, "(attribute %p)\n", attr); + datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); + wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); + used = wlp_set_wssid(ptr, &wss->wssid); + used += wlp_set_wss_name(ptr + used, wss->name, strlen(wss->name)); + used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); + used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); + used += wlp_set_wss_bcast(ptr + used, &wss->bcast); + d_dump(6, NULL, attr, sizeof(*attr) + datalen); + d_fnend(6, NULL, "(attribute %p, used %d)\n", + attr, (int)(sizeof(*attr) + used)); + return sizeof(*attr) + used; +} + +/** + * Verify attribute header + * + * @hdr: Pointer to attribute header that will be verified. + * @type: Expected attribute type. + * @len: Expected length of attribute value (excluding header). + * + * Most attribute values have a known length even when they do have a + * length field. This knowledge can be used via this function to verify + * that the length field matches the expected value. + */ +static int wlp_check_attr_hdr(struct wlp *wlp, struct wlp_attr_hdr *hdr, + enum wlp_attr_type type, unsigned len) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + + if (le16_to_cpu(hdr->type) != type) { + dev_err(dev, "WLP: unexpected header type. Expected " + "%u, got %u.\n", type, le16_to_cpu(hdr->type)); + return -EINVAL; + } + if (le16_to_cpu(hdr->length) != len) { + dev_err(dev, "WLP: unexpected length in header. Expected " + "%u, got %u.\n", len, le16_to_cpu(hdr->length)); + return -EINVAL; + } + return 0; +} + +/** + * Check if header of WSS information attribute valid + * + * @returns: length of WSS attributes (value of length attribute field) if + * valid WSS information attribute found + * -ENODATA if no WSS information attribute found + * -EIO other error occured + * + * The WSS information attribute is optional. The function will be provided + * with a pointer to data that could _potentially_ be a WSS information + * attribute. If a valid WSS information attribute is found it will return + * 0, if no WSS information attribute is found it will return -ENODATA, and + * another error will be returned if it is a WSS information attribute, but + * some parsing failure occured. + */ +static int wlp_check_wss_info_attr_hdr(struct wlp *wlp, + struct wlp_attr_hdr *hdr, size_t buflen) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + size_t len; + int result = 0; + + if (buflen < sizeof(*hdr)) { + dev_err(dev, "WLP: Not enough space in buffer to parse" + " WSS information attribute header.\n"); + result = -EIO; + goto out; + } + if (le16_to_cpu(hdr->type) != WLP_ATTR_WSS_INFO) { + /* WSS information is optional */ + result = -ENODATA; + goto out; + } + len = le16_to_cpu(hdr->length); + if (buflen < sizeof(*hdr) + len) { + dev_err(dev, "WLP: Not enough space in buffer to parse " + "variable data. Got %d, expected %d.\n", + (int)buflen, (int)(sizeof(*hdr) + len)); + result = -EIO; + goto out; + } + result = len; +out: + return result; +} + + +/** + * Get value of attribute from fixed size attribute field. + * + * @attr: Pointer to attribute field. + * @value: Pointer to variable in which attribute value will be placed. + * @buflen: Size of buffer in which attribute field (including header) + * can be found. + * @returns: Amount of given buffer consumed by parsing for this attribute. + * + * The size and type of the value is known by the type of the attribute. + */ +#define wlp_get(type, type_code, name) \ +ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \ + type *value, ssize_t buflen) \ +{ \ + struct device *dev = &wlp->rc->uwb_dev.dev; \ + if (buflen < 0) \ + return -EINVAL; \ + if (buflen < sizeof(*attr)) { \ + dev_err(dev, "WLP: Not enough space in buffer to parse" \ + " attribute field. Need %d, received %zu\n", \ + (int)sizeof(*attr), buflen); \ + return -EIO; \ + } \ + if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \ + sizeof(attr->name)) < 0) { \ + dev_err(dev, "WLP: Header verification failed. \n"); \ + return -EINVAL; \ + } \ + *value = attr->name; \ + return sizeof(*attr); \ +} + +#define wlp_get_sparse(type, type_code, name) \ + static wlp_get(type, type_code, name) + +/** + * Get value of attribute from variable sized attribute field. + * + * @max: The maximum size of this attribute. This value is dictated by + * the maximum value from the WLP specification. + * + * @attr: Pointer to attribute field. + * @value: Pointer to variable that will contain the value. The memory + * must already have been allocated for this value. + * @buflen: Size of buffer in which attribute field (including header) + * can be found. + * @returns: Amount of given bufferconsumed by parsing for this attribute. + */ +#define wlp_vget(type_val, type_code, name, max) \ +static ssize_t wlp_get_##name(struct wlp *wlp, \ + struct wlp_attr_##name *attr, \ + type_val *value, ssize_t buflen) \ +{ \ + struct device *dev = &wlp->rc->uwb_dev.dev; \ + size_t len; \ + if (buflen < 0) \ + return -EINVAL; \ + if (buflen < sizeof(*attr)) { \ + dev_err(dev, "WLP: Not enough space in buffer to parse" \ + " header.\n"); \ + return -EIO; \ + } \ + if (le16_to_cpu(attr->hdr.type) != type_code) { \ + dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \ + "expected %u.\n", le16_to_cpu(attr->hdr.type), \ + type_code); \ + return -EINVAL; \ + } \ + len = le16_to_cpu(attr->hdr.length); \ + if (len > max) { \ + dev_err(dev, "WLP: Attribute larger than maximum " \ + "allowed. Received %zu, max is %d.\n", len, \ + (int)max); \ + return -EFBIG; \ + } \ + if (buflen < sizeof(*attr) + len) { \ + dev_err(dev, "WLP: Not enough space in buffer to parse "\ + "variable data.\n"); \ + return -EIO; \ + } \ + memcpy(value, (void *) attr + sizeof(*attr), len); \ + return sizeof(*attr) + len; \ +} + +wlp_get(u8, WLP_ATTR_WLP_VER, version) +wlp_get_sparse(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd) +wlp_get_sparse(struct wlp_dev_type, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type) +wlp_get_sparse(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err) +wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_E, uuid_e) +wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_R, uuid_r) +wlp_get(struct wlp_uuid, WLP_ATTR_WSSID, wssid) +wlp_get_sparse(u8, WLP_ATTR_ACC_ENRL, accept_enrl) +wlp_get_sparse(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status) +wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_BCAST, wss_bcast) +wlp_get_sparse(u8, WLP_ATTR_WSS_TAG, wss_tag) +wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_VIRT, wss_virt) +wlp_get_sparse(struct wlp_nonce, WLP_ATTR_ENRL_NONCE, enonce) +wlp_get_sparse(struct wlp_nonce, WLP_ATTR_REG_NONCE, rnonce) + +/* The buffers for the device info attributes can be found in the + * wlp_device_info struct. These buffers contain one byte more than the + * max allowed by the spec - this is done to be able to add the + * terminating \0 for user display. This terminating byte is not required + * in the actual attribute field (because it has a length field) so the + * maximum allowed for this value is one less than its size in the + * structure. + */ +wlp_vget(char, WLP_ATTR_WSS_NAME, wss_name, + FIELD_SIZEOF(struct wlp_wss, name) - 1) +wlp_vget(char, WLP_ATTR_DEV_NAME, dev_name, + FIELD_SIZEOF(struct wlp_device_info, name) - 1) +wlp_vget(char, WLP_ATTR_MANUF, manufacturer, + FIELD_SIZEOF(struct wlp_device_info, manufacturer) - 1) +wlp_vget(char, WLP_ATTR_MODEL_NAME, model_name, + FIELD_SIZEOF(struct wlp_device_info, model_name) - 1) +wlp_vget(char, WLP_ATTR_MODEL_NR, model_nr, + FIELD_SIZEOF(struct wlp_device_info, model_nr) - 1) +wlp_vget(char, WLP_ATTR_SERIAL, serial, + FIELD_SIZEOF(struct wlp_device_info, serial) - 1) + +/** + * Retrieve WSS Name, Accept enroll, Secure status, Broadcast from WSS info + * + * @attr: pointer to WSS name attribute in WSS information attribute field + * @info: structure that will be populated with data from WSS information + * field (WSS name, Accept enroll, secure status, broadcast address) + * @buflen: size of buffer + * + * Although the WSSID attribute forms part of the WSS info attribute it is + * retrieved separately and stored in a different location. + */ +static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, + struct wlp_attr_hdr *attr, + struct wlp_wss_tmp_info *info, + ssize_t buflen) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + void *ptr = attr; + size_t used = 0; + ssize_t result = -EINVAL; + + d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n"); + result = wlp_get_wss_name(wlp, ptr, info->name, buflen); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS name from " + "WSS info in D2 message.\n"); + goto error_parse; + } + used += result; + d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n"); + result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, + buflen - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain accepting " + "enrollment from WSS info in D2 message.\n"); + goto error_parse; + } + if (info->accept_enroll != 0 && info->accept_enroll != 1) { + dev_err(dev, "WLP: invalid value for accepting " + "enrollment in D2 message.\n"); + result = -EINVAL; + goto error_parse; + } + used += result; + d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n"); + result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, + buflen - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain secure " + "status from WSS info in D2 message.\n"); + goto error_parse; + } + if (info->sec_status != 0 && info->sec_status != 1) { + dev_err(dev, "WLP: invalid value for secure " + "status in D2 message.\n"); + result = -EINVAL; + goto error_parse; + } + used += result; + d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n"); + result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, + buflen - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain broadcast " + "address from WSS info in D2 message.\n"); + goto error_parse; + } + used += result; + result = used; +error_parse: + return result; +} + +/** + * Create a new WSSID entry for the neighbor, allocate temporary storage + * + * Each neighbor can have many WSS active. We maintain a list of WSSIDs + * advertised by neighbor. During discovery we also cache information about + * these WSS in temporary storage. + * + * The temporary storage will be removed after it has been used (eg. + * displayed to user), the wssid element will be removed from the list when + * the neighbor is rediscovered or when it disappears. + */ +static struct wlp_wssid_e *wlp_create_wssid_e(struct wlp *wlp, + struct wlp_neighbor_e *neighbor) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_wssid_e *wssid_e; + + wssid_e = kzalloc(sizeof(*wssid_e), GFP_KERNEL); + if (wssid_e == NULL) { + dev_err(dev, "WLP: unable to allocate memory " + "for WSS information.\n"); + goto error_alloc; + } + wssid_e->info = kzalloc(sizeof(struct wlp_wss_tmp_info), GFP_KERNEL); + if (wssid_e->info == NULL) { + dev_err(dev, "WLP: unable to allocate memory " + "for temporary WSS information.\n"); + kfree(wssid_e); + wssid_e = NULL; + goto error_alloc; + } + list_add(&wssid_e->node, &neighbor->wssid); +error_alloc: + return wssid_e; +} + +/** + * Parse WSS information attribute + * + * @attr: pointer to WSS information attribute header + * @buflen: size of buffer in which WSS information attribute appears + * @wssid: will place wssid from WSS info attribute in this location + * @wss_info: will place other information from WSS information attribute + * in this location + * + * memory for @wssid and @wss_info must be allocated when calling this + */ +static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr, + size_t buflen, struct wlp_uuid *wssid, + struct wlp_wss_tmp_info *wss_info) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + ssize_t result; + size_t len; + size_t used = 0; + void *ptr; + + result = wlp_check_wss_info_attr_hdr(wlp, (struct wlp_attr_hdr *)attr, + buflen); + if (result < 0) + goto out; + len = result; + used = sizeof(*attr); + ptr = attr; + d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n"); + result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); + goto out; + } + used += result; + result = wlp_get_wss_info_attrs(wlp, ptr + used, wss_info, + buflen - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS information " + "from WSS information attributes. \n"); + goto out; + } + used += result; + if (len + sizeof(*attr) != used) { + dev_err(dev, "WLP: Amount of data parsed does not " + "match length field. Parsed %zu, length " + "field %zu. \n", used, len); + result = -EINVAL; + goto out; + } + result = used; + d_printf(6, dev, "WLP: Successfully parsed WLP information " + "attribute. used %zu bytes\n", used); +out: + return result; +} + +/** + * Retrieve WSS info from association frame + * + * @attr: pointer to WSS information attribute + * @neighbor: ptr to neighbor being discovered, NULL if enrollment in + * progress + * @wss: ptr to WSS being enrolled in, NULL if discovery in progress + * @buflen: size of buffer in which WSS information appears + * + * The WSS information attribute appears in the D2 association message. + * This message is used in two ways: to discover all neighbors or to enroll + * into a WSS activated by a neighbor. During discovery we only want to + * store the WSS info in a cache, to be deleted right after it has been + * used (eg. displayed to the user). During enrollment we store the WSS + * information for the lifetime of enrollment. + * + * During discovery we are interested in all WSS information, during + * enrollment we are only interested in the WSS being enrolled in. Even so, + * when in enrollment we keep parsing the message after finding the WSS of + * interest, this simplifies the calling routine in that it can be sure + * that all WSS information attributes have been parsed out of the message. + * + * Association frame is process with nbmutex held. The list access is safe. + */ +static ssize_t wlp_get_all_wss_info(struct wlp *wlp, + struct wlp_attr_wss_info *attr, + struct wlp_neighbor_e *neighbor, + struct wlp_wss *wss, ssize_t buflen) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + size_t used = 0; + ssize_t result = -EINVAL; + struct wlp_attr_wss_info *cur; + struct wlp_uuid wssid; + struct wlp_wss_tmp_info wss_info; + unsigned enroll; /* 0 - discovery to cache, 1 - enrollment */ + struct wlp_wssid_e *wssid_e; + char buf[WLP_WSS_UUID_STRSIZE]; + + d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n", + wlp, attr, neighbor, wss, (int)buflen); + if (buflen < 0) + goto out; + + if (neighbor != NULL && wss == NULL) + enroll = 0; /* discovery */ + else if (wss != NULL && neighbor == NULL) + enroll = 1; /* enrollment */ + else + goto out; + + cur = attr; + while (buflen - used > 0) { + memset(&wss_info, 0, sizeof(wss_info)); + cur = (void *)cur + used; + result = wlp_get_wss_info(wlp, cur, buflen - used, &wssid, + &wss_info); + if (result == -ENODATA) { + result = used; + goto out; + } else if (result < 0) { + dev_err(dev, "WLP: Unable to parse WSS information " + "from WSS information attribute. \n"); + result = -EINVAL; + goto error_parse; + } + if (enroll && !memcmp(&wssid, &wss->wssid, sizeof(wssid))) { + if (wss_info.accept_enroll != 1) { + dev_err(dev, "WLP: Requested WSS does " + "not accept enrollment.\n"); + result = -EINVAL; + goto out; + } + memcpy(wss->name, wss_info.name, sizeof(wss->name)); + wss->bcast = wss_info.bcast; + wss->secure_status = wss_info.sec_status; + wss->accept_enroll = wss_info.accept_enroll; + wss->state = WLP_WSS_STATE_PART_ENROLLED; + wlp_wss_uuid_print(buf, sizeof(buf), &wssid); + d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n", + buf); + } else { + wssid_e = wlp_create_wssid_e(wlp, neighbor); + if (wssid_e == NULL) { + dev_err(dev, "WLP: Cannot create new WSSID " + "entry for neighbor %02x:%02x.\n", + neighbor->uwb_dev->dev_addr.data[1], + neighbor->uwb_dev->dev_addr.data[0]); + result = -ENOMEM; + goto out; + } + wssid_e->wssid = wssid; + *wssid_e->info = wss_info; + } + used += result; + } + result = used; +error_parse: + if (result < 0 && !enroll) /* this was a discovery */ + wlp_remove_neighbor_tmp_info(neighbor); +out: + d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, " + "result %d \n", wlp, attr, neighbor, wss, (int)buflen, + (int)result); + return result; + +} + +/** + * Parse WSS information attributes into cache for discovery + * + * @attr: the first WSS information attribute in message + * @neighbor: the neighbor whose cache will be populated + * @buflen: size of the input buffer + */ +static ssize_t wlp_get_wss_info_to_cache(struct wlp *wlp, + struct wlp_attr_wss_info *attr, + struct wlp_neighbor_e *neighbor, + ssize_t buflen) +{ + return wlp_get_all_wss_info(wlp, attr, neighbor, NULL, buflen); +} + +/** + * Parse WSS information attributes into WSS struct for enrollment + * + * @attr: the first WSS information attribute in message + * @wss: the WSS that will be enrolled + * @buflen: size of the input buffer + */ +static ssize_t wlp_get_wss_info_to_enroll(struct wlp *wlp, + struct wlp_attr_wss_info *attr, + struct wlp_wss *wss, ssize_t buflen) +{ + return wlp_get_all_wss_info(wlp, attr, NULL, wss, buflen); +} + +/** + * Construct a D1 association frame + * + * We use the radio control functions to determine the values of the device + * properties. These are of variable length and the total space needed is + * tallied first before we start constructing the message. The radio + * control functions return strings that are terminated with \0. This + * character should not be included in the message (there is a length field + * accompanying it in the attribute). + */ +static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb) +{ + + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + struct wlp_device_info *info; + size_t used = 0; + struct wlp_frame_assoc *_d1; + struct sk_buff *_skb; + void *d1_itr; + + d_fnstart(6, dev, "wlp %p\n", wlp); + if (wlp->dev_info == NULL) { + result = __wlp_setup_device_info(wlp); + if (result < 0) { + dev_err(dev, "WLP: Unable to setup device " + "information for D1 message.\n"); + goto error; + } + } + info = wlp->dev_info; + d_printf(6, dev, "Local properties:\n" + "Device name (%d bytes): %s\n" + "Model name (%d bytes): %s\n" + "Manufacturer (%d bytes): %s\n" + "Model number (%d bytes): %s\n" + "Serial number (%d bytes): %s\n" + "Primary device type: \n" + " Category: %d \n" + " OUI: %02x:%02x:%02x \n" + " OUI Subdivision: %u \n", + (int)strlen(info->name), info->name, + (int)strlen(info->model_name), info->model_name, + (int)strlen(info->manufacturer), info->manufacturer, + (int)strlen(info->model_nr), info->model_nr, + (int)strlen(info->serial), info->serial, + info->prim_dev_type.category, + info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], + info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); + _skb = dev_alloc_skb(sizeof(*_d1) + + sizeof(struct wlp_attr_uuid_e) + + sizeof(struct wlp_attr_wss_sel_mthd) + + sizeof(struct wlp_attr_dev_name) + + strlen(info->name) + + sizeof(struct wlp_attr_manufacturer) + + strlen(info->manufacturer) + + sizeof(struct wlp_attr_model_name) + + strlen(info->model_name) + + sizeof(struct wlp_attr_model_nr) + + strlen(info->model_nr) + + sizeof(struct wlp_attr_serial) + + strlen(info->serial) + + sizeof(struct wlp_attr_prim_dev_type) + + sizeof(struct wlp_attr_wlp_assc_err)); + if (_skb == NULL) { + dev_err(dev, "WLP: Cannot allocate memory for association " + "message.\n"); + result = -ENOMEM; + goto error; + } + _d1 = (void *) _skb->data; + d_printf(6, dev, "D1 starts at %p \n", _d1); + _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); + _d1->hdr.type = WLP_FRAME_ASSOCIATION; + _d1->type = WLP_ASSOC_D1; + + wlp_set_version(&_d1->version, WLP_VERSION); + wlp_set_msg_type(&_d1->msg_type, WLP_ASSOC_D1); + d1_itr = _d1->attr; + used = wlp_set_uuid_e(d1_itr, &wlp->uuid); + used += wlp_set_wss_sel_mthd(d1_itr + used, WLP_WSS_REG_SELECT); + used += wlp_set_dev_name(d1_itr + used, info->name, + strlen(info->name)); + used += wlp_set_manufacturer(d1_itr + used, info->manufacturer, + strlen(info->manufacturer)); + used += wlp_set_model_name(d1_itr + used, info->model_name, + strlen(info->model_name)); + used += wlp_set_model_nr(d1_itr + used, info->model_nr, + strlen(info->model_nr)); + used += wlp_set_serial(d1_itr + used, info->serial, + strlen(info->serial)); + used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); + used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); + skb_put(_skb, sizeof(*_d1) + used); + d_printf(6, dev, "D1 message:\n"); + d_dump(6, dev, _d1, sizeof(*_d1) + + sizeof(struct wlp_attr_uuid_e) + + sizeof(struct wlp_attr_wss_sel_mthd) + + sizeof(struct wlp_attr_dev_name) + + strlen(info->name) + + sizeof(struct wlp_attr_manufacturer) + + strlen(info->manufacturer) + + sizeof(struct wlp_attr_model_name) + + strlen(info->model_name) + + sizeof(struct wlp_attr_model_nr) + + strlen(info->model_nr) + + sizeof(struct wlp_attr_serial) + + strlen(info->serial) + + sizeof(struct wlp_attr_prim_dev_type) + + sizeof(struct wlp_attr_wlp_assc_err)); + *skb = _skb; +error: + d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); + return result; +} + +/** + * Construct a D2 association frame + * + * We use the radio control functions to determine the values of the device + * properties. These are of variable length and the total space needed is + * tallied first before we start constructing the message. The radio + * control functions return strings that are terminated with \0. This + * character should not be included in the message (there is a length field + * accompanying it in the attribute). + */ +static +int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb, struct wlp_uuid *uuid_e) +{ + + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + struct wlp_device_info *info; + size_t used = 0; + struct wlp_frame_assoc *_d2; + struct sk_buff *_skb; + void *d2_itr; + size_t mem_needed; + + d_fnstart(6, dev, "wlp %p\n", wlp); + if (wlp->dev_info == NULL) { + result = __wlp_setup_device_info(wlp); + if (result < 0) { + dev_err(dev, "WLP: Unable to setup device " + "information for D2 message.\n"); + goto error; + } + } + info = wlp->dev_info; + d_printf(6, dev, "Local properties:\n" + "Device name (%d bytes): %s\n" + "Model name (%d bytes): %s\n" + "Manufacturer (%d bytes): %s\n" + "Model number (%d bytes): %s\n" + "Serial number (%d bytes): %s\n" + "Primary device type: \n" + " Category: %d \n" + " OUI: %02x:%02x:%02x \n" + " OUI Subdivision: %u \n", + (int)strlen(info->name), info->name, + (int)strlen(info->model_name), info->model_name, + (int)strlen(info->manufacturer), info->manufacturer, + (int)strlen(info->model_nr), info->model_nr, + (int)strlen(info->serial), info->serial, + info->prim_dev_type.category, + info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], + info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); + mem_needed = sizeof(*_d2) + + sizeof(struct wlp_attr_uuid_e) + + sizeof(struct wlp_attr_uuid_r) + + sizeof(struct wlp_attr_dev_name) + + strlen(info->name) + + sizeof(struct wlp_attr_manufacturer) + + strlen(info->manufacturer) + + sizeof(struct wlp_attr_model_name) + + strlen(info->model_name) + + sizeof(struct wlp_attr_model_nr) + + strlen(info->model_nr) + + sizeof(struct wlp_attr_serial) + + strlen(info->serial) + + sizeof(struct wlp_attr_prim_dev_type) + + sizeof(struct wlp_attr_wlp_assc_err); + if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE) + mem_needed += sizeof(struct wlp_attr_wss_info) + + sizeof(struct wlp_wss_info) + + strlen(wlp->wss.name); + _skb = dev_alloc_skb(mem_needed); + if (_skb == NULL) { + dev_err(dev, "WLP: Cannot allocate memory for association " + "message.\n"); + result = -ENOMEM; + goto error; + } + _d2 = (void *) _skb->data; + d_printf(6, dev, "D2 starts at %p \n", _d2); + _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); + _d2->hdr.type = WLP_FRAME_ASSOCIATION; + _d2->type = WLP_ASSOC_D2; + + wlp_set_version(&_d2->version, WLP_VERSION); + wlp_set_msg_type(&_d2->msg_type, WLP_ASSOC_D2); + d2_itr = _d2->attr; + used = wlp_set_uuid_e(d2_itr, uuid_e); + used += wlp_set_uuid_r(d2_itr + used, &wlp->uuid); + if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE) + used += wlp_set_wss_info(d2_itr + used, &wlp->wss); + used += wlp_set_dev_name(d2_itr + used, info->name, + strlen(info->name)); + used += wlp_set_manufacturer(d2_itr + used, info->manufacturer, + strlen(info->manufacturer)); + used += wlp_set_model_name(d2_itr + used, info->model_name, + strlen(info->model_name)); + used += wlp_set_model_nr(d2_itr + used, info->model_nr, + strlen(info->model_nr)); + used += wlp_set_serial(d2_itr + used, info->serial, + strlen(info->serial)); + used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); + used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); + skb_put(_skb, sizeof(*_d2) + used); + d_printf(6, dev, "D2 message:\n"); + d_dump(6, dev, _d2, mem_needed); + *skb = _skb; +error: + d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); + return result; +} + +/** + * Allocate memory for and populate fields of F0 association frame + * + * Currently (while focusing on unsecure enrollment) we ignore the + * nonce's that could be placed in the message. Only the error field is + * populated by the value provided by the caller. + */ +static +int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, + enum wlp_assc_error error) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = -ENOMEM; + struct { + struct wlp_frame_assoc f0_hdr; + struct wlp_attr_enonce enonce; + struct wlp_attr_rnonce rnonce; + struct wlp_attr_wlp_assc_err assc_err; + } *f0; + struct sk_buff *_skb; + struct wlp_nonce tmp; + + d_fnstart(6, dev, "wlp %p\n", wlp); + _skb = dev_alloc_skb(sizeof(*f0)); + if (_skb == NULL) { + dev_err(dev, "WLP: Unable to allocate memory for F0 " + "association frame. \n"); + goto error_alloc; + } + f0 = (void *) _skb->data; + d_printf(6, dev, "F0 starts at %p \n", f0); + f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); + f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; + f0->f0_hdr.type = WLP_ASSOC_F0; + wlp_set_version(&f0->f0_hdr.version, WLP_VERSION); + wlp_set_msg_type(&f0->f0_hdr.msg_type, WLP_ASSOC_F0); + memset(&tmp, 0, sizeof(tmp)); + wlp_set_enonce(&f0->enonce, &tmp); + wlp_set_rnonce(&f0->rnonce, &tmp); + wlp_set_wlp_assc_err(&f0->assc_err, error); + skb_put(_skb, sizeof(*f0)); + *skb = _skb; + result = 0; +error_alloc: + d_fnend(6, dev, "wlp %p, result %d \n", wlp, result); + return result; +} + +/** + * Parse F0 frame + * + * We just retrieve the values and print it as an error to the user. + * Calling function already knows an error occured (F0 indicates error), so + * we just parse the content as debug for higher layers. + */ +int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_frame_assoc *f0 = (void *) skb->data; + void *ptr = skb->data; + size_t len = skb->len; + size_t used; + ssize_t result; + struct wlp_nonce enonce, rnonce; + enum wlp_assc_error assc_err; + char enonce_buf[WLP_WSS_NONCE_STRSIZE]; + char rnonce_buf[WLP_WSS_NONCE_STRSIZE]; + + used = sizeof(*f0); + result = wlp_get_enonce(wlp, ptr + used, &enonce, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Enrollee nonce " + "attribute from F0 message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_rnonce(wlp, ptr + used, &rnonce, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Registrar nonce " + "attribute from F0 message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WLP Association error " + "attribute from F0 message.\n"); + goto error_parse; + } + wlp_wss_nonce_print(enonce_buf, sizeof(enonce_buf), &enonce); + wlp_wss_nonce_print(rnonce_buf, sizeof(rnonce_buf), &rnonce); + dev_err(dev, "WLP: Received F0 error frame from neighbor. Enrollee " + "nonce: %s, Registrar nonce: %s, WLP Association error: %s.\n", + enonce_buf, rnonce_buf, wlp_assc_error_str(assc_err)); + result = 0; +error_parse: + return result; +} + +/** + * Retrieve variable device information from association message + * + * The device information parsed is not required in any message. This + * routine will thus not fail if an attribute is not present. + * The attributes are expected in a certain order, even if all are not + * present. The "attribute type" value is used to ensure the attributes + * are parsed in the correct order. + * + * If an error is encountered during parsing the function will return an + * error code, when this happens the given device_info structure may be + * partially filled. + */ +static +int wlp_get_variable_info(struct wlp *wlp, void *data, + struct wlp_device_info *dev_info, ssize_t len) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + size_t used = 0; + struct wlp_attr_hdr *hdr; + ssize_t result = 0; + unsigned last = 0; + + while (len - used > 0) { + if (len - used < sizeof(*hdr)) { + dev_err(dev, "WLP: Partial data in frame, cannot " + "parse. \n"); + goto error_parse; + } + hdr = data + used; + switch (le16_to_cpu(hdr->type)) { + case WLP_ATTR_MANUF: + if (last >= WLP_ATTR_MANUF) { + dev_err(dev, "WLP: Incorrect order of " + "attribute values in D1 msg.\n"); + goto error_parse; + } + result = wlp_get_manufacturer(wlp, data + used, + dev_info->manufacturer, + len - used); + if (result < 0) { + dev_err(dev, "WLP: Unable to obtain " + "Manufacturer attribute from D1 " + "message.\n"); + goto error_parse; + } + last = WLP_ATTR_MANUF; + used += result; + break; + case WLP_ATTR_MODEL_NAME: + if (last >= WLP_ATTR_MODEL_NAME) { + dev_err(dev, "WLP: Incorrect order of " + "attribute values in D1 msg.\n"); + goto error_parse; + } + result = wlp_get_model_name(wlp, data + used, + dev_info->model_name, + len - used); + if (result < 0) { + dev_err(dev, "WLP: Unable to obtain Model " + "name attribute from D1 message.\n"); + goto error_parse; + } + last = WLP_ATTR_MODEL_NAME; + used += result; + break; + case WLP_ATTR_MODEL_NR: + if (last >= WLP_ATTR_MODEL_NR) { + dev_err(dev, "WLP: Incorrect order of " + "attribute values in D1 msg.\n"); + goto error_parse; + } + result = wlp_get_model_nr(wlp, data + used, + dev_info->model_nr, + len - used); + if (result < 0) { + dev_err(dev, "WLP: Unable to obtain Model " + "number attribute from D1 message.\n"); + goto error_parse; + } + last = WLP_ATTR_MODEL_NR; + used += result; + break; + case WLP_ATTR_SERIAL: + if (last >= WLP_ATTR_SERIAL) { + dev_err(dev, "WLP: Incorrect order of " + "attribute values in D1 msg.\n"); + goto error_parse; + } + result = wlp_get_serial(wlp, data + used, + dev_info->serial, len - used); + if (result < 0) { + dev_err(dev, "WLP: Unable to obtain Serial " + "number attribute from D1 message.\n"); + goto error_parse; + } + last = WLP_ATTR_SERIAL; + used += result; + break; + case WLP_ATTR_PRI_DEV_TYPE: + if (last >= WLP_ATTR_PRI_DEV_TYPE) { + dev_err(dev, "WLP: Incorrect order of " + "attribute values in D1 msg.\n"); + goto error_parse; + } + result = wlp_get_prim_dev_type(wlp, data + used, + &dev_info->prim_dev_type, + len - used); + if (result < 0) { + dev_err(dev, "WLP: Unable to obtain Primary " + "device type attribute from D1 " + "message.\n"); + goto error_parse; + } + dev_info->prim_dev_type.category = + le16_to_cpu(dev_info->prim_dev_type.category); + dev_info->prim_dev_type.subID = + le16_to_cpu(dev_info->prim_dev_type.subID); + last = WLP_ATTR_PRI_DEV_TYPE; + used += result; + break; + default: + /* This is not variable device information. */ + goto out; + break; + } + } +out: + return used; +error_parse: + return -EINVAL; +} + +/** + * Parse incoming D1 frame, populate attribute values + * + * Caller provides pointers to memory already allocated for attributes + * expected in the D1 frame. These variables will be populated. + */ +static +int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb, + struct wlp_uuid *uuid_e, + enum wlp_wss_sel_mthd *sel_mthd, + struct wlp_device_info *dev_info, + enum wlp_assc_error *assc_err) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_frame_assoc *d1 = (void *) skb->data; + void *ptr = skb->data; + size_t len = skb->len; + size_t used; + ssize_t result; + + used = sizeof(*d1); + result = wlp_get_uuid_e(wlp, ptr + used, uuid_e, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain UUID-E attribute from D1 " + "message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_wss_sel_mthd(wlp, ptr + used, sel_mthd, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS selection method " + "from D1 message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_dev_name(wlp, ptr + used, dev_info->name, + len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Device Name from D1 " + "message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Device Information from " + "D1 message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_wlp_assc_err(wlp, ptr + used, assc_err, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WLP Association Error " + "Information from D1 message.\n"); + goto error_parse; + } + result = 0; +error_parse: + return result; +} +/** + * Handle incoming D1 frame + * + * The frame has already been verified to contain an Association header with + * the correct version number. Parse the incoming frame, construct and send + * a D2 frame in response. + * + * It is not clear what to do with most fields in the incoming D1 frame. We + * retrieve and discard the information here for now. + */ +void wlp_handle_d1_frame(struct work_struct *ws) +{ + struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws, + struct wlp_assoc_frame_ctx, + ws); + struct wlp *wlp = frame_ctx->wlp; + struct wlp_wss *wss = &wlp->wss; + struct sk_buff *skb = frame_ctx->skb; + struct uwb_dev_addr *src = &frame_ctx->src; + int result; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_uuid uuid_e; + enum wlp_wss_sel_mthd sel_mthd = 0; + struct wlp_device_info dev_info; + enum wlp_assc_error assc_err; + char uuid[WLP_WSS_UUID_STRSIZE]; + struct sk_buff *resp = NULL; + + /* Parse D1 frame */ + d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n", + wlp, skb); + mutex_lock(&wss->mutex); + mutex_lock(&wlp->mutex); /* to access wlp->uuid */ + memset(&dev_info, 0, sizeof(dev_info)); + result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info, + &assc_err); + if (result < 0) { + dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n"); + kfree_skb(skb); + goto out; + } + wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e); + d_printf(6, dev, "From D1 frame:\n" + "UUID-E: %s\n" + "Selection method: %d\n" + "Device name (%d bytes): %s\n" + "Model name (%d bytes): %s\n" + "Manufacturer (%d bytes): %s\n" + "Model number (%d bytes): %s\n" + "Serial number (%d bytes): %s\n" + "Primary device type: \n" + " Category: %d \n" + " OUI: %02x:%02x:%02x \n" + " OUI Subdivision: %u \n", + uuid, sel_mthd, + (int)strlen(dev_info.name), dev_info.name, + (int)strlen(dev_info.model_name), dev_info.model_name, + (int)strlen(dev_info.manufacturer), dev_info.manufacturer, + (int)strlen(dev_info.model_nr), dev_info.model_nr, + (int)strlen(dev_info.serial), dev_info.serial, + dev_info.prim_dev_type.category, + dev_info.prim_dev_type.OUI[0], + dev_info.prim_dev_type.OUI[1], + dev_info.prim_dev_type.OUI[2], + dev_info.prim_dev_type.OUIsubdiv); + + kfree_skb(skb); + if (!wlp_uuid_is_set(&wlp->uuid)) { + dev_err(dev, "WLP: UUID is not set. Set via sysfs to " + "proceed. Respong to D1 message with error F0.\n"); + result = wlp_build_assoc_f0(wlp, &resp, + WLP_ASSOC_ERROR_NOT_READY); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct F0 message.\n"); + goto out; + } + } else { + /* Construct D2 frame */ + result = wlp_build_assoc_d2(wlp, wss, &resp, &uuid_e); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct D2 message.\n"); + goto out; + } + } + /* Send D2 frame */ + BUG_ON(wlp->xmit_frame == NULL); + result = wlp->xmit_frame(wlp, resp, src); + if (result < 0) { + dev_err(dev, "WLP: Unable to transmit D2 association " + "message: %d\n", result); + if (result == -ENXIO) + dev_err(dev, "WLP: Is network interface up? \n"); + /* We could try again ... */ + dev_kfree_skb_any(resp); /* we need to free if tx fails */ + } +out: + kfree(frame_ctx); + mutex_unlock(&wlp->mutex); + mutex_unlock(&wss->mutex); + d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp); +} + +/** + * Parse incoming D2 frame, create and populate temporary cache + * + * @skb: socket buffer in which D2 frame can be found + * @neighbor: the neighbor that sent the D2 frame + * + * Will allocate memory for temporary storage of information learned during + * discovery. + */ +int wlp_parse_d2_frame_to_cache(struct wlp *wlp, struct sk_buff *skb, + struct wlp_neighbor_e *neighbor) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_frame_assoc *d2 = (void *) skb->data; + void *ptr = skb->data; + size_t len = skb->len; + size_t used; + ssize_t result; + struct wlp_uuid uuid_e; + struct wlp_device_info *nb_info; + enum wlp_assc_error assc_err; + + used = sizeof(*d2); + result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 " + "message.\n"); + goto error_parse; + } + if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) { + dev_err(dev, "WLP: UUID-E in incoming D2 does not match " + "local UUID sent in D1. \n"); + goto error_parse; + } + used += result; + result = wlp_get_uuid_r(wlp, ptr + used, &neighbor->uuid, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 " + "message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_wss_info_to_cache(wlp, ptr + used, neighbor, + len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS information " + "from D2 message.\n"); + goto error_parse; + } + used += result; + neighbor->info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL); + if (neighbor->info == NULL) { + dev_err(dev, "WLP: cannot allocate memory to store device " + "info.\n"); + result = -ENOMEM; + goto error_parse; + } + nb_info = neighbor->info; + result = wlp_get_dev_name(wlp, ptr + used, nb_info->name, + len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Device Name from D2 " + "message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_variable_info(wlp, ptr + used, nb_info, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Device Information from " + "D2 message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WLP Association Error " + "Information from D2 message.\n"); + goto error_parse; + } + if (assc_err != WLP_ASSOC_ERROR_NONE) { + dev_err(dev, "WLP: neighbor device returned association " + "error %d\n", assc_err); + result = -EINVAL; + goto error_parse; + } + result = 0; +error_parse: + if (result < 0) + wlp_remove_neighbor_tmp_info(neighbor); + return result; +} + +/** + * Parse incoming D2 frame, populate attribute values of WSS bein enrolled in + * + * @wss: our WSS that will be enrolled + * @skb: socket buffer in which D2 frame can be found + * @neighbor: the neighbor that sent the D2 frame + * @wssid: the wssid of the WSS in which we want to enroll + * + * Forms part of enrollment sequence. We are trying to enroll in WSS with + * @wssid by using @neighbor as registrar. A D1 message was sent to + * @neighbor and now we need to parse the D2 response. The neighbor's + * response is searched for the requested WSS and if found (and it accepts + * enrollment), we store the information. + */ +int wlp_parse_d2_frame_to_enroll(struct wlp_wss *wss, struct sk_buff *skb, + struct wlp_neighbor_e *neighbor, + struct wlp_uuid *wssid) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + void *ptr = skb->data; + size_t len = skb->len; + size_t used; + ssize_t result; + struct wlp_uuid uuid_e; + struct wlp_uuid uuid_r; + struct wlp_device_info nb_info; + enum wlp_assc_error assc_err; + char uuid_bufA[WLP_WSS_UUID_STRSIZE]; + char uuid_bufB[WLP_WSS_UUID_STRSIZE]; + + used = sizeof(struct wlp_frame_assoc); + result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 " + "message.\n"); + goto error_parse; + } + if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) { + dev_err(dev, "WLP: UUID-E in incoming D2 does not match " + "local UUID sent in D1. \n"); + goto error_parse; + } + used += result; + result = wlp_get_uuid_r(wlp, ptr + used, &uuid_r, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 " + "message.\n"); + goto error_parse; + } + if (memcmp(&uuid_r, &neighbor->uuid, sizeof(uuid_r))) { + wlp_wss_uuid_print(uuid_bufA, sizeof(uuid_bufA), + &neighbor->uuid); + wlp_wss_uuid_print(uuid_bufB, sizeof(uuid_bufB), &uuid_r); + dev_err(dev, "WLP: UUID of neighbor does not match UUID " + "learned during discovery. Originally discovered: %s, " + "now from D2 message: %s\n", uuid_bufA, uuid_bufB); + result = -EINVAL; + goto error_parse; + } + used += result; + wss->wssid = *wssid; + result = wlp_get_wss_info_to_enroll(wlp, ptr + used, wss, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS information " + "from D2 message.\n"); + goto error_parse; + } + if (wss->state != WLP_WSS_STATE_PART_ENROLLED) { + dev_err(dev, "WLP: D2 message did not contain information " + "for successful enrollment. \n"); + result = -EINVAL; + goto error_parse; + } + used += result; + /* Place device information on stack to continue parsing of message */ + result = wlp_get_dev_name(wlp, ptr + used, nb_info.name, + len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Device Name from D2 " + "message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_variable_info(wlp, ptr + used, &nb_info, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain Device Information from " + "D2 message.\n"); + goto error_parse; + } + used += result; + result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WLP Association Error " + "Information from D2 message.\n"); + goto error_parse; + } + if (assc_err != WLP_ASSOC_ERROR_NONE) { + dev_err(dev, "WLP: neighbor device returned association " + "error %d\n", assc_err); + if (wss->state == WLP_WSS_STATE_PART_ENROLLED) { + dev_err(dev, "WLP: Enrolled in WSS (should not " + "happen according to spec). Undoing. \n"); + wlp_wss_reset(wss); + } + result = -EINVAL; + goto error_parse; + } + result = 0; +error_parse: + return result; +} + +/** + * Parse C3/C4 frame into provided variables + * + * @wssid: will point to copy of wssid retrieved from C3/C4 frame + * @tag: will point to copy of tag retrieved from C3/C4 frame + * @virt_addr: will point to copy of virtual address retrieved from C3/C4 + * frame. + * + * Calling function has to allocate memory for these values. + * + * skb contains a valid C3/C4 frame, return the individual fields of this + * frame in the provided variables. + */ +int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb, + struct wlp_uuid *wssid, u8 *tag, + struct uwb_mac_addr *virt_addr) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result; + void *ptr = skb->data; + size_t len = skb->len; + size_t used; + char buf[WLP_WSS_UUID_STRSIZE]; + struct wlp_frame_assoc *assoc = ptr; + + d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); + used = sizeof(*assoc); + result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSSID attribute from " + "%s message.\n", wlp_assoc_frame_str(assoc->type)); + goto error_parse; + } + used += result; + result = wlp_get_wss_tag(wlp, ptr + used, tag, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS tag attribute from " + "%s message.\n", wlp_assoc_frame_str(assoc->type)); + goto error_parse; + } + used += result; + result = wlp_get_wss_virt(wlp, ptr + used, virt_addr, len - used); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSS virtual address " + "attribute from %s message.\n", + wlp_assoc_frame_str(assoc->type)); + goto error_parse; + } + wlp_wss_uuid_print(buf, sizeof(buf), wssid); + d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt " + "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag, + virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], + virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); + +error_parse: + d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); + return result; +} + +/** + * Allocate memory for and populate fields of C1 or C2 association frame + * + * The C1 and C2 association frames appear identical - except for the type. + */ +static +int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb, enum wlp_assoc_type type) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = -ENOMEM; + struct { + struct wlp_frame_assoc c_hdr; + struct wlp_attr_wssid wssid; + } *c; + struct sk_buff *_skb; + + d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); + _skb = dev_alloc_skb(sizeof(*c)); + if (_skb == NULL) { + dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " + "association frame. \n"); + goto error_alloc; + } + c = (void *) _skb->data; + d_printf(6, dev, "C1/C2 starts at %p \n", c); + c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); + c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; + c->c_hdr.type = type; + wlp_set_version(&c->c_hdr.version, WLP_VERSION); + wlp_set_msg_type(&c->c_hdr.msg_type, type); + wlp_set_wssid(&c->wssid, &wss->wssid); + skb_put(_skb, sizeof(*c)); + d_printf(6, dev, "C1/C2 message:\n"); + d_dump(6, dev, c, sizeof(*c)); + *skb = _skb; + result = 0; +error_alloc: + d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); + return result; +} + + +static +int wlp_build_assoc_c1(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb) +{ + return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C1); +} + +static +int wlp_build_assoc_c2(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb) +{ + return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C2); +} + + +/** + * Allocate memory for and populate fields of C3 or C4 association frame + * + * The C3 and C4 association frames appear identical - except for the type. + */ +static +int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb, enum wlp_assoc_type type) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = -ENOMEM; + struct { + struct wlp_frame_assoc c_hdr; + struct wlp_attr_wssid wssid; + struct wlp_attr_wss_tag wss_tag; + struct wlp_attr_wss_virt wss_virt; + } *c; + struct sk_buff *_skb; + + d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); + _skb = dev_alloc_skb(sizeof(*c)); + if (_skb == NULL) { + dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " + "association frame. \n"); + goto error_alloc; + } + c = (void *) _skb->data; + d_printf(6, dev, "C3/C4 starts at %p \n", c); + c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); + c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; + c->c_hdr.type = type; + wlp_set_version(&c->c_hdr.version, WLP_VERSION); + wlp_set_msg_type(&c->c_hdr.msg_type, type); + wlp_set_wssid(&c->wssid, &wss->wssid); + wlp_set_wss_tag(&c->wss_tag, wss->tag); + wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); + skb_put(_skb, sizeof(*c)); + d_printf(6, dev, "C3/C4 message:\n"); + d_dump(6, dev, c, sizeof(*c)); + *skb = _skb; + result = 0; +error_alloc: + d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); + return result; +} + +static +int wlp_build_assoc_c3(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb) +{ + return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C3); +} + +static +int wlp_build_assoc_c4(struct wlp *wlp, struct wlp_wss *wss, + struct sk_buff **skb) +{ + return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C4); +} + + +#define wlp_send_assoc(type, id) \ +static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \ + struct uwb_dev_addr *dev_addr) \ +{ \ + struct device *dev = &wlp->rc->uwb_dev.dev; \ + int result; \ + struct sk_buff *skb = NULL; \ + d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ + wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ + d_printf(6, dev, "WLP: Constructing %s frame. \n", \ + wlp_assoc_frame_str(id)); \ + /* Build the frame */ \ + result = wlp_build_assoc_##type(wlp, wss, &skb); \ + if (result < 0) { \ + dev_err(dev, "WLP: Unable to construct %s association " \ + "frame: %d\n", wlp_assoc_frame_str(id), result);\ + goto error_build_assoc; \ + } \ + /* Send the frame */ \ + d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \ + wlp_assoc_frame_str(id), \ + dev_addr->data[1], dev_addr->data[0]); \ + BUG_ON(wlp->xmit_frame == NULL); \ + result = wlp->xmit_frame(wlp, skb, dev_addr); \ + if (result < 0) { \ + dev_err(dev, "WLP: Unable to transmit %s association " \ + "message: %d\n", wlp_assoc_frame_str(id), \ + result); \ + if (result == -ENXIO) \ + dev_err(dev, "WLP: Is network interface " \ + "up? \n"); \ + goto error_xmit; \ + } \ + return 0; \ +error_xmit: \ + /* We could try again ... */ \ + dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ +error_build_assoc: \ + d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ + wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ + return result; \ +} + +wlp_send_assoc(d1, WLP_ASSOC_D1) +wlp_send_assoc(c1, WLP_ASSOC_C1) +wlp_send_assoc(c3, WLP_ASSOC_C3) + +int wlp_send_assoc_frame(struct wlp *wlp, struct wlp_wss *wss, + struct uwb_dev_addr *dev_addr, + enum wlp_assoc_type type) +{ + int result = 0; + struct device *dev = &wlp->rc->uwb_dev.dev; + switch (type) { + case WLP_ASSOC_D1: + result = wlp_send_assoc_d1(wlp, wss, dev_addr); + break; + case WLP_ASSOC_C1: + result = wlp_send_assoc_c1(wlp, wss, dev_addr); + break; + case WLP_ASSOC_C3: + result = wlp_send_assoc_c3(wlp, wss, dev_addr); + break; + default: + dev_err(dev, "WLP: Received request to send unknown " + "association message.\n"); + result = -EINVAL; + break; + } + return result; +} + +/** + * Handle incoming C1 frame + * + * The frame has already been verified to contain an Association header with + * the correct version number. Parse the incoming frame, construct and send + * a C2 frame in response. + */ +void wlp_handle_c1_frame(struct work_struct *ws) +{ + struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws, + struct wlp_assoc_frame_ctx, + ws); + struct wlp *wlp = frame_ctx->wlp; + struct wlp_wss *wss = &wlp->wss; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_frame_assoc *c1 = (void *) frame_ctx->skb->data; + unsigned int len = frame_ctx->skb->len; + struct uwb_dev_addr *src = &frame_ctx->src; + int result; + struct wlp_uuid wssid; + char buf[WLP_WSS_UUID_STRSIZE]; + struct sk_buff *resp = NULL; + + /* Parse C1 frame */ + d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n", + wlp, c1); + mutex_lock(&wss->mutex); + result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, + len - sizeof(*c1)); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); + goto out; + } + wlp_wss_uuid_print(buf, sizeof(buf), &wssid); + d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf); + if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) + && wss->state == WLP_WSS_STATE_ACTIVE) { + d_printf(6, dev, "WSSID from C1 frame is known locally " + "and is active\n"); + /* Construct C2 frame */ + result = wlp_build_assoc_c2(wlp, wss, &resp); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct C2 message.\n"); + goto out; + } + } else { + d_printf(6, dev, "WSSID from C1 frame is not known locally " + "or is not active\n"); + /* Construct F0 frame */ + result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct F0 message.\n"); + goto out; + } + } + /* Send C2 frame */ + d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n", + src->data[1], src->data[0]); + BUG_ON(wlp->xmit_frame == NULL); + result = wlp->xmit_frame(wlp, resp, src); + if (result < 0) { + dev_err(dev, "WLP: Unable to transmit response association " + "message: %d\n", result); + if (result == -ENXIO) + dev_err(dev, "WLP: Is network interface up? \n"); + /* We could try again ... */ + dev_kfree_skb_any(resp); /* we need to free if tx fails */ + } +out: + kfree_skb(frame_ctx->skb); + kfree(frame_ctx); + mutex_unlock(&wss->mutex); + d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp); +} + +/** + * Handle incoming C3 frame + * + * The frame has already been verified to contain an Association header with + * the correct version number. Parse the incoming frame, construct and send + * a C4 frame in response. If the C3 frame identifies a WSS that is locally + * active then we connect to this neighbor (add it to our EDA cache). + */ +void wlp_handle_c3_frame(struct work_struct *ws) +{ + struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws, + struct wlp_assoc_frame_ctx, + ws); + struct wlp *wlp = frame_ctx->wlp; + struct wlp_wss *wss = &wlp->wss; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct sk_buff *skb = frame_ctx->skb; + struct uwb_dev_addr *src = &frame_ctx->src; + int result; + char buf[WLP_WSS_UUID_STRSIZE]; + struct sk_buff *resp = NULL; + struct wlp_uuid wssid; + u8 tag; + struct uwb_mac_addr virt_addr; + + /* Parse C3 frame */ + d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", + wlp, skb); + mutex_lock(&wss->mutex); + result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); + goto out; + } + wlp_wss_uuid_print(buf, sizeof(buf), &wssid); + d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf); + if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) + && wss->state >= WLP_WSS_STATE_ACTIVE) { + d_printf(6, dev, "WSSID from C3 frame is known locally " + "and is active\n"); + result = wlp_eda_update_node(&wlp->eda, src, wss, + (void *) virt_addr.data, tag, + WLP_WSS_CONNECTED); + if (result < 0) { + dev_err(dev, "WLP: Unable to update EDA cache " + "with new connected neighbor information.\n"); + result = wlp_build_assoc_f0(wlp, &resp, + WLP_ASSOC_ERROR_INT); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct F0 " + "message.\n"); + goto out; + } + } else { + wss->state = WLP_WSS_STATE_CONNECTED; + /* Construct C4 frame */ + result = wlp_build_assoc_c4(wlp, wss, &resp); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct C4 " + "message.\n"); + goto out; + } + } + } else { + d_printf(6, dev, "WSSID from C3 frame is not known locally " + "or is not active\n"); + /* Construct F0 frame */ + result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); + if (result < 0) { + dev_err(dev, "WLP: Unable to construct F0 message.\n"); + goto out; + } + } + /* Send C4 frame */ + d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n", + src->data[1], src->data[0]); + BUG_ON(wlp->xmit_frame == NULL); + result = wlp->xmit_frame(wlp, resp, src); + if (result < 0) { + dev_err(dev, "WLP: Unable to transmit response association " + "message: %d\n", result); + if (result == -ENXIO) + dev_err(dev, "WLP: Is network interface up? \n"); + /* We could try again ... */ + dev_kfree_skb_any(resp); /* we need to free if tx fails */ + } +out: + kfree_skb(frame_ctx->skb); + kfree(frame_ctx); + mutex_unlock(&wss->mutex); + d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", + wlp, skb); +} + + -- cgit v0.10.2 From 2f19204480f16a20d8571a97c13f0cec2968607c Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Wed, 17 Sep 2008 16:34:18 +0100 Subject: uwb: add WiMedia LLC Protocol stack (WSS) Add the Wireless Service Set (WSS) code. Signed-off-by: David Vrabel diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c new file mode 100644 index 0000000..96b18c9 --- /dev/null +++ b/drivers/uwb/wlp/wss-lc.c @@ -0,0 +1,1055 @@ +/* + * WiMedia Logical Link Control Protocol (WLP) + * + * Copyright (C) 2007 Intel Corporation + * Reinette Chatre + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Implementation of the WLP association protocol. + * + * FIXME: Docs + * + * A UWB network interface will configure a WSS through wlp_wss_setup() after + * the interface has been assigned a MAC address, typically after + * "ifconfig" has been called. When the interface goes down it should call + * wlp_wss_remove(). + * + * When the WSS is ready for use the user interacts via sysfs to create, + * discover, and activate WSS. + * + * wlp_wss_enroll_activate() + * + * wlp_wss_create_activate() + * wlp_wss_set_wssid_hash() + * wlp_wss_comp_wssid_hash() + * wlp_wss_sel_bcast_addr() + * wlp_wss_sysfs_add() + * + * Called when no more references to WSS exist: + * wlp_wss_release() + * wlp_wss_reset() + */ + +#include /* for is_valid_ether_addr */ +#include +#include +#define D_LOCAL 5 +#include +#include "wlp-internal.h" + + +size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) +{ + size_t result; + + result = scnprintf(buf, bufsize, + "%02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x", + key[0], key[1], key[2], key[3], + key[4], key[5], key[6], key[7], + key[8], key[9], key[10], key[11], + key[12], key[13], key[14], key[15]); + return result; +} + +/** + * Compute WSSID hash + * WLP Draft 0.99 [7.2.1] + * + * The WSSID hash for a WSSID is the result of an octet-wise exclusive-OR + * of all octets in the WSSID. + */ +static +u8 wlp_wss_comp_wssid_hash(struct wlp_uuid *wssid) +{ + return wssid->data[0] ^ wssid->data[1] ^ wssid->data[2] + ^ wssid->data[3] ^ wssid->data[4] ^ wssid->data[5] + ^ wssid->data[6] ^ wssid->data[7] ^ wssid->data[8] + ^ wssid->data[9] ^ wssid->data[10] ^ wssid->data[11] + ^ wssid->data[12] ^ wssid->data[13] ^ wssid->data[14] + ^ wssid->data[15]; +} + +/** + * Select a multicast EUI-48 for the WSS broadcast address. + * WLP Draft 0.99 [7.2.1] + * + * Selected based on the WiMedia Alliance OUI, 00-13-88, within the WLP + * range, [01-13-88-00-01-00, 01-13-88-00-01-FF] inclusive. + * + * This address is currently hardcoded. + * FIXME? + */ +static +struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss) +{ + struct uwb_mac_addr bcast = { + .data = { 0x01, 0x13, 0x88, 0x00, 0x01, 0x00 } + }; + return bcast; +} + +/** + * Clear the contents of the WSS structure - all except kobj, mutex, virtual + * + * We do not want to reinitialize - the internal kobj should not change as + * it still points to the parent received during setup. The mutex should + * remain also. We thus just reset values individually. + * The virutal address assigned to WSS will remain the same for the + * lifetime of the WSS. We only reset the fields that can change during its + * lifetime. + */ +void wlp_wss_reset(struct wlp_wss *wss) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + d_fnstart(5, dev, "wss (%p) \n", wss); + memset(&wss->wssid, 0, sizeof(wss->wssid)); + wss->hash = 0; + memset(&wss->name[0], 0, sizeof(wss->name)); + memset(&wss->bcast, 0, sizeof(wss->bcast)); + wss->secure_status = WLP_WSS_UNSECURE; + memset(&wss->master_key[0], 0, sizeof(wss->master_key)); + wss->tag = 0; + wss->state = WLP_WSS_STATE_NONE; + d_fnend(5, dev, "wss (%p) \n", wss); +} + +/** + * Create sysfs infrastructure for WSS + * + * The WSS is configured to have the interface as parent (see wlp_wss_setup()) + * a new sysfs directory that includes wssid as its name is created in the + * interface's sysfs directory. The group of files interacting with WSS are + * created also. + */ +static +int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + int result; + + d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str); + result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); + if (result < 0) + return result; + wss->kobj.ktype = &wss_ktype; + result = kobject_init_and_add(&wss->kobj, + &wss_ktype, wss->kobj.parent, "wlp"); + if (result < 0) { + dev_err(dev, "WLP: Cannot register WSS kobject.\n"); + goto error_kobject_register; + } + result = sysfs_create_group(&wss->kobj, &wss_attr_group); + if (result < 0) { + dev_err(dev, "WLP: Cannot register WSS attributes: %d\n", + result); + goto error_sysfs_create_group; + } + d_fnend(5, dev, "Completed. result = %d \n", result); + return 0; +error_sysfs_create_group: + + kobject_put(&wss->kobj); /* will free name if needed */ + return result; +error_kobject_register: + kfree(wss->kobj.name); + wss->kobj.name = NULL; + wss->kobj.ktype = NULL; + return result; +} + + +/** + * Release WSS + * + * No more references exist to this WSS. We should undo everything that was + * done in wlp_wss_create_activate() except removing the group. The group + * is not removed because an object can be unregistered before the group is + * created. We also undo any additional operations on the WSS after this + * (addition of members). + * + * If memory was allocated for the kobject's name then it will + * be freed by the kobject system during this time. + * + * The EDA cache is removed and reinitilized when the WSS is removed. We + * thus loose knowledge of members of this WSS at that time and need not do + * it here. + */ +void wlp_wss_release(struct kobject *kobj) +{ + struct wlp_wss *wss = container_of(kobj, struct wlp_wss, kobj); + + wlp_wss_reset(wss); +} + +/** + * Enroll into a WSS using provided neighbor as registrar + * + * First search the neighborhood information to learn which neighbor is + * referred to, next proceed with enrollment. + * + * &wss->mutex is held + */ +static +int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid, + struct uwb_dev_addr *dest) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_neighbor_e *neighbor; + char buf[WLP_WSS_UUID_STRSIZE]; + int result = -ENXIO; + struct uwb_dev_addr *dev_addr; + + wlp_wss_uuid_print(buf, sizeof(buf), wssid); + d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n", + wss, buf, dest->data[1], dest->data[0]); + mutex_lock(&wlp->nbmutex); + list_for_each_entry(neighbor, &wlp->neighbors, node) { + dev_addr = &neighbor->uwb_dev->dev_addr; + if (!memcmp(dest, dev_addr, sizeof(*dest))) { + d_printf(5, dev, "Neighbor %02x:%02x is valid, " + "enrolling. \n", + dev_addr->data[1], dev_addr->data[0]); + result = wlp_enroll_neighbor(wlp, neighbor, wss, + wssid); + break; + } + } + if (result == -ENXIO) + dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", + dest->data[1], dest->data[0]); + mutex_unlock(&wlp->nbmutex); + d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n", + wss, buf, dest->data[1], dest->data[0], result); + return result; +} + +/** + * Enroll into a WSS previously discovered + * + * User provides WSSID of WSS, search for neighbor that has this WSS + * activated and attempt to enroll. + * + * &wss->mutex is held + */ +static +int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + struct wlp_neighbor_e *neighbor; + struct wlp_wssid_e *wssid_e; + char buf[WLP_WSS_UUID_STRSIZE]; + int result = -ENXIO; + + wlp_wss_uuid_print(buf, sizeof(buf), wssid); + d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf); + mutex_lock(&wlp->nbmutex); + list_for_each_entry(neighbor, &wlp->neighbors, node) { + list_for_each_entry(wssid_e, &neighbor->wssid, node) { + if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { + d_printf(5, dev, "Found WSSID %s in neighbor " + "%02x:%02x cache. \n", buf, + neighbor->uwb_dev->dev_addr.data[1], + neighbor->uwb_dev->dev_addr.data[0]); + result = wlp_enroll_neighbor(wlp, neighbor, + wss, wssid); + if (result == 0) /* enrollment success */ + goto out; + break; + } + } + } +out: + if (result == -ENXIO) + dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); + mutex_unlock(&wlp->nbmutex); + d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result); + return result; +} + +/** + * Enroll into WSS with provided WSSID, registrar may be provided + * + * @wss: out WSS that will be enrolled + * @wssid: wssid of neighboring WSS that we want to enroll in + * @devaddr: registrar can be specified, will be broadcast (ff:ff) if any + * neighbor can be used as registrar. + * + * &wss->mutex is held + */ +static +int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid, + struct uwb_dev_addr *devaddr) +{ + int result; + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + char buf[WLP_WSS_UUID_STRSIZE]; + struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; + + wlp_wss_uuid_print(buf, sizeof(buf), wssid); + if (wss->state != WLP_WSS_STATE_NONE) { + dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); + result = -EEXIST; + goto error; + } + if (!memcmp(&bcast, devaddr, sizeof(bcast))) { + d_printf(5, dev, "Request to enroll in discovered WSS " + "with WSSID %s \n", buf); + result = wlp_wss_enroll_discovered(wss, wssid); + } else { + d_printf(5, dev, "Request to enroll in WSSID %s with " + "registrar %02x:%02x\n", buf, devaddr->data[1], + devaddr->data[0]); + result = wlp_wss_enroll_target(wss, wssid, devaddr); + } + if (result < 0) { + dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", + buf, result); + goto error; + } + d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf); + result = wlp_wss_sysfs_add(wss, buf); + if (result < 0) { + dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); + wlp_wss_reset(wss); + } +error: + return result; + +} + +/** + * Activate given WSS + * + * Prior to activation a WSS must be enrolled. To activate a WSS a device + * includes the WSS hash in the WLP IE in its beacon in each superframe. + * WLP 0.99 [7.2.5]. + * + * The WSS tag is also computed at this time. We only support one activated + * WSS so we can use the hash as a tag - there will never be a conflict. + * + * We currently only support one activated WSS so only one WSS hash is + * included in the WLP IE. + */ +static +int wlp_wss_activate(struct wlp_wss *wss) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + struct uwb_rc *uwb_rc = wlp->rc; + int result; + struct { + struct wlp_ie wlp_ie; + u8 hash; /* only include one hash */ + } ie_data; + + d_fnstart(5, dev, "Activating WSS %p. \n", wss); + BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); + wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); + wss->tag = wss->hash; + memset(&ie_data, 0, sizeof(ie_data)); + ie_data.wlp_ie.hdr.element_id = UWB_IE_WLP; + ie_data.wlp_ie.hdr.length = sizeof(ie_data) - sizeof(struct uwb_ie_hdr); + wlp_ie_set_hash_length(&ie_data.wlp_ie, sizeof(ie_data.hash)); + ie_data.hash = wss->hash; + result = uwb_rc_ie_add(uwb_rc, &ie_data.wlp_ie.hdr, + sizeof(ie_data)); + if (result < 0) { + dev_err(dev, "WLP: Unable to add WLP IE to beacon. " + "result = %d.\n", result); + goto error_wlp_ie; + } + wss->state = WLP_WSS_STATE_ACTIVE; + result = 0; +error_wlp_ie: + d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result); + return result; +} + +/** + * Enroll in and activate WSS identified by provided WSSID + * + * The neighborhood cache should contain a list of all neighbors and the + * WSS they have activated. Based on that cache we search which neighbor we + * can perform the association process with. The user also has option to + * specify which neighbor it prefers as registrar. + * Successful enrollment is followed by activation. + * Successful activation will create the sysfs directory containing + * specific information regarding this WSS. + */ +int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, + struct uwb_dev_addr *devaddr) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + char buf[WLP_WSS_UUID_STRSIZE]; + + d_fnstart(5, dev, "Enrollment and activation requested. \n"); + mutex_lock(&wss->mutex); + result = wlp_wss_enroll(wss, wssid, devaddr); + if (result < 0) { + wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); + dev_err(dev, "WLP: Enrollment into WSS %s failed.\n", buf); + goto error_enroll; + } + result = wlp_wss_activate(wss); + if (result < 0) { + dev_err(dev, "WLP: Unable to activate WSS. Undoing enrollment " + "result = %d \n", result); + /* Undo enrollment */ + wlp_wss_reset(wss); + goto error_activate; + } +error_activate: +error_enroll: + mutex_unlock(&wss->mutex); + d_fnend(5, dev, "Completed. result = %d \n", result); + return result; +} + +/** + * Create, enroll, and activate a new WSS + * + * @wssid: new wssid provided by user + * @name: WSS name requested by used. + * @sec_status: security status requested by user + * + * A user requested the creation of a new WSS. All operations are done + * locally. The new WSS will be stored locally, the hash will be included + * in the WLP IE, and the sysfs infrastructure for this WSS will be + * created. + */ +int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, + char *name, unsigned sec_status, unsigned accept) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + char buf[WLP_WSS_UUID_STRSIZE]; + d_fnstart(5, dev, "Request to create new WSS.\n"); + result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); + d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, " + "sec_status=%u, accepting enrollment=%u \n", + buf, name, sec_status, accept); + if (!mutex_trylock(&wss->mutex)) { + dev_err(dev, "WLP: WLP association session in progress.\n"); + return -EBUSY; + } + if (wss->state != WLP_WSS_STATE_NONE) { + dev_err(dev, "WLP: WSS already exists. Not creating new.\n"); + result = -EEXIST; + goto out; + } + if (wss->kobj.parent == NULL) { + dev_err(dev, "WLP: WSS parent not ready. Is network interface " + "up?\n"); + result = -ENXIO; + goto out; + } + if (sec_status == WLP_WSS_SECURE) { + dev_err(dev, "WLP: FIXME Creation of secure WSS not " + "supported yet.\n"); + result = -EINVAL; + goto out; + } + wss->wssid = *wssid; + memcpy(wss->name, name, sizeof(wss->name)); + wss->bcast = wlp_wss_sel_bcast_addr(wss); + wss->secure_status = sec_status; + wss->accept_enroll = accept; + /*wss->virtual_addr is initialized in call to wlp_wss_setup*/ + /* sysfs infrastructure */ + result = wlp_wss_sysfs_add(wss, buf); + if (result < 0) { + dev_err(dev, "Cannot set up sysfs for WSS kobject.\n"); + wlp_wss_reset(wss); + goto out; + } else + result = 0; + wss->state = WLP_WSS_STATE_ENROLLED; + result = wlp_wss_activate(wss); + if (result < 0) { + dev_err(dev, "WLP: Unable to activate WSS. Undoing " + "enrollment\n"); + wlp_wss_reset(wss); + goto out; + } + result = 0; +out: + mutex_unlock(&wss->mutex); + d_fnend(5, dev, "Completed. result = %d \n", result); + return result; +} + +/** + * Determine if neighbor has WSS activated + * + * @returns: 1 if neighbor has WSS activated, zero otherwise + * + * This can be done in two ways: + * - send a C1 frame, parse C2/F0 response + * - examine the WLP IE sent by the neighbor + * + * The WLP IE is not fully supported in hardware so we use the C1/C2 frame + * exchange to determine if a WSS is activated. Using the WLP IE should be + * faster and should be used when it becomes possible. + */ +int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, + struct uwb_dev_addr *dev_addr) +{ + int result = 0; + struct device *dev = &wlp->rc->uwb_dev.dev; + char buf[WLP_WSS_UUID_STRSIZE]; + DECLARE_COMPLETION_ONSTACK(completion); + struct wlp_session session; + struct sk_buff *skb; + struct wlp_frame_assoc *resp; + struct wlp_uuid wssid; + + wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); + d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", + wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); + mutex_lock(&wlp->mutex); + /* Send C1 association frame */ + result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); + if (result < 0) { + dev_err(dev, "Unable to send C1 frame to neighbor " + "%02x:%02x (%d)\n", dev_addr->data[1], + dev_addr->data[0], result); + result = 0; + goto out; + } + /* Create session, wait for response */ + session.exp_message = WLP_ASSOC_C2; + session.cb = wlp_session_cb; + session.cb_priv = &completion; + session.neighbor_addr = *dev_addr; + BUG_ON(wlp->session != NULL); + wlp->session = &session; + /* Wait for C2/F0 frame */ + result = wait_for_completion_interruptible_timeout(&completion, + WLP_PER_MSG_TIMEOUT * HZ); + if (result == 0) { + dev_err(dev, "Timeout while sending C1 to neighbor " + "%02x:%02x.\n", dev_addr->data[1], + dev_addr->data[0]); + goto out; + } + if (result < 0) { + dev_err(dev, "Unable to send C1 to neighbor %02x:%02x.\n", + dev_addr->data[1], dev_addr->data[0]); + result = 0; + goto out; + } + /* Parse message in session->data: it will be either C2 or F0 */ + skb = session.data; + resp = (void *) skb->data; + d_printf(5, dev, "Received response to C1 frame. \n"); + d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); + if (resp->type == WLP_ASSOC_F0) { + result = wlp_parse_f0(wlp, skb); + if (result < 0) + dev_err(dev, "WLP: unable to parse incoming F0 " + "frame from neighbor %02x:%02x.\n", + dev_addr->data[1], dev_addr->data[0]); + result = 0; + goto error_resp_parse; + } + /* WLP version and message type fields have already been parsed */ + result = wlp_get_wssid(wlp, (void *)resp + sizeof(*resp), &wssid, + skb->len - sizeof(*resp)); + if (result < 0) { + dev_err(dev, "WLP: unable to obtain WSSID from C2 frame.\n"); + result = 0; + goto error_resp_parse; + } + if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) { + d_printf(5, dev, "WSSID in C2 frame matches local " + "active WSS.\n"); + result = 1; + } else { + dev_err(dev, "WLP: Received a C2 frame without matching " + "WSSID.\n"); + result = 0; + } +error_resp_parse: + kfree_skb(skb); +out: + wlp->session = NULL; + mutex_unlock(&wlp->mutex); + d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", + wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); + return result; +} + +/** + * Activate connection with neighbor by updating EDA cache + * + * @wss: local WSS to which neighbor wants to connect + * @dev_addr: neighbor's address + * @wssid: neighbor's WSSID - must be same as our WSS's WSSID + * @tag: neighbor's WSS tag used to identify frames transmitted by it + * @virt_addr: neighbor's virtual EUI-48 + */ +static +int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss, + struct uwb_dev_addr *dev_addr, + struct wlp_uuid *wssid, u8 *tag, + struct uwb_mac_addr *virt_addr) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + char buf[WLP_WSS_UUID_STRSIZE]; + wlp_wss_uuid_print(buf, sizeof(buf), wssid); + d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " + "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag, + virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], + virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); + + if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { + d_printf(5, dev, "WSSID from neighbor frame matches local " + "active WSS.\n"); + /* Update EDA cache */ + result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, + (void *) virt_addr->data, *tag, + WLP_WSS_CONNECTED); + if (result < 0) + dev_err(dev, "WLP: Unable to update EDA cache " + "with new connected neighbor information.\n"); + } else { + dev_err(dev, "WLP: Neighbor does not have matching " + "WSSID.\n"); + result = -EINVAL; + } + + d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " + "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n", + wlp, wss, buf, *tag, + virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], + virt_addr->data[3], virt_addr->data[4], virt_addr->data[5], + result); + + return result; +} + +/** + * Connect to WSS neighbor + * + * Use C3/C4 exchange to determine if neighbor has WSS activated and + * retrieve the WSS tag and virtual EUI-48 of the neighbor. + */ +static +int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, + struct uwb_dev_addr *dev_addr) +{ + int result; + struct device *dev = &wlp->rc->uwb_dev.dev; + char buf[WLP_WSS_UUID_STRSIZE]; + struct wlp_uuid wssid; + u8 tag; + struct uwb_mac_addr virt_addr; + DECLARE_COMPLETION_ONSTACK(completion); + struct wlp_session session; + struct wlp_frame_assoc *resp; + struct sk_buff *skb; + + wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); + d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", + wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); + mutex_lock(&wlp->mutex); + /* Send C3 association frame */ + result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); + if (result < 0) { + dev_err(dev, "Unable to send C3 frame to neighbor " + "%02x:%02x (%d)\n", dev_addr->data[1], + dev_addr->data[0], result); + goto out; + } + /* Create session, wait for response */ + session.exp_message = WLP_ASSOC_C4; + session.cb = wlp_session_cb; + session.cb_priv = &completion; + session.neighbor_addr = *dev_addr; + BUG_ON(wlp->session != NULL); + wlp->session = &session; + /* Wait for C4/F0 frame */ + result = wait_for_completion_interruptible_timeout(&completion, + WLP_PER_MSG_TIMEOUT * HZ); + if (result == 0) { + dev_err(dev, "Timeout while sending C3 to neighbor " + "%02x:%02x.\n", dev_addr->data[1], + dev_addr->data[0]); + result = -ETIMEDOUT; + goto out; + } + if (result < 0) { + dev_err(dev, "Unable to send C3 to neighbor %02x:%02x.\n", + dev_addr->data[1], dev_addr->data[0]); + goto out; + } + /* Parse message in session->data: it will be either C4 or F0 */ + skb = session.data; + resp = (void *) skb->data; + d_printf(5, dev, "Received response to C3 frame. \n"); + d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); + if (resp->type == WLP_ASSOC_F0) { + result = wlp_parse_f0(wlp, skb); + if (result < 0) + dev_err(dev, "WLP: unable to parse incoming F0 " + "frame from neighbor %02x:%02x.\n", + dev_addr->data[1], dev_addr->data[0]); + result = -EINVAL; + goto error_resp_parse; + } + result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); + if (result < 0) { + dev_err(dev, "WLP: Unable to parse C4 frame from neighbor.\n"); + goto error_resp_parse; + } + result = wlp_wss_activate_connection(wlp, wss, dev_addr, &wssid, &tag, + &virt_addr); + if (result < 0) { + dev_err(dev, "WLP: Unable to activate connection to " + "neighbor %02x:%02x.\n", dev_addr->data[1], + dev_addr->data[0]); + goto error_resp_parse; + } +error_resp_parse: + kfree_skb(skb); +out: + /* Record that we unsuccessfully tried to connect to this neighbor */ + if (result < 0) + wlp_eda_update_node_state(&wlp->eda, dev_addr, + WLP_WSS_CONNECT_FAILED); + wlp->session = NULL; + mutex_unlock(&wlp->mutex); + d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", + wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); + return result; +} + +/** + * Connect to neighbor with common WSS, send pending frame + * + * This function is scheduled when a frame is destined to a neighbor with + * which we do not have a connection. A copy of the EDA cache entry is + * provided - not the actual cache entry (because it is protected by a + * spinlock). + * + * First determine if neighbor has the same WSS activated, connect if it + * does. The C3/C4 exchange is dual purpose to determine if neighbor has + * WSS activated and proceed with the connection. + * + * The frame that triggered the connection setup is sent after connection + * setup. + * + * network queue is stopped - we need to restart when done + * + */ +static +void wlp_wss_connect_send(struct work_struct *ws) +{ + struct wlp_assoc_conn_ctx *conn_ctx = container_of(ws, + struct wlp_assoc_conn_ctx, + ws); + struct wlp *wlp = conn_ctx->wlp; + struct sk_buff *skb = conn_ctx->skb; + struct wlp_eda_node *eda_entry = &conn_ctx->eda_entry; + struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; + struct wlp_wss *wss = &wlp->wss; + int result; + struct device *dev = &wlp->rc->uwb_dev.dev; + char buf[WLP_WSS_UUID_STRSIZE]; + + mutex_lock(&wss->mutex); + wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); + d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", + wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); + if (wss->state < WLP_WSS_STATE_ACTIVE) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Attempting to connect with " + "WSS that is not active or connected.\n"); + dev_kfree_skb(skb); + goto out; + } + /* Establish connection - send C3 rcv C4 */ + result = wlp_wss_connect_neighbor(wlp, wss, dev_addr); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to establish connection " + "with neighbor %02x:%02x.\n", + dev_addr->data[1], dev_addr->data[0]); + dev_kfree_skb(skb); + goto out; + } + /* EDA entry changed, update the local copy being used */ + result = wlp_copy_eda_node(&wlp->eda, dev_addr, eda_entry); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Cannot find EDA entry for " + "neighbor %02x:%02x \n", + dev_addr->data[1], dev_addr->data[0]); + } + result = wlp_wss_prep_hdr(wlp, eda_entry, skb); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to prepare frame header for " + "transmission (neighbor %02x:%02x). \n", + dev_addr->data[1], dev_addr->data[0]); + dev_kfree_skb(skb); + goto out; + } + BUG_ON(wlp->xmit_frame == NULL); + result = wlp->xmit_frame(wlp, skb, dev_addr); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to transmit frame: %d\n", + result); + if (result == -ENXIO) + dev_err(dev, "WLP: Is network interface up? \n"); + /* We could try again ... */ + dev_kfree_skb(skb);/*we need to free if tx fails */ + } +out: + kfree(conn_ctx); + BUG_ON(wlp->start_queue == NULL); + wlp->start_queue(wlp); + mutex_unlock(&wss->mutex); + d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf); +} + +/** + * Add WLP header to outgoing skb + * + * @eda_entry: pointer to neighbor's entry in the EDA cache + * @_skb: skb containing data destined to the neighbor + */ +int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry, + void *_skb) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + unsigned char *eth_addr = eda_entry->eth_addr; + struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; + struct sk_buff *skb = _skb; + struct wlp_frame_std_abbrv_hdr *std_hdr; + + d_fnstart(6, dev, "wlp %p \n", wlp); + if (eda_entry->state == WLP_WSS_CONNECTED) { + /* Add WLP header */ + BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); + std_hdr = (void *) __skb_push(skb, sizeof(*std_hdr)); + std_hdr->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); + std_hdr->hdr.type = WLP_FRAME_STANDARD; + std_hdr->tag = eda_entry->wss->tag; + } else { + if (printk_ratelimit()) + dev_err(dev, "WLP: Destination neighbor (Ethernet: " + "%02x:%02x:%02x:%02x:%02x:%02x, Dev: " + "%02x:%02x) is not connected. \n", eth_addr[0], + eth_addr[1], eth_addr[2], eth_addr[3], + eth_addr[4], eth_addr[5], dev_addr->data[1], + dev_addr->data[0]); + result = -EINVAL; + } + d_fnend(6, dev, "wlp %p \n", wlp); + return result; +} + + +/** + * Prepare skb for neighbor: connect if not already and prep WLP header + * + * This function is called in interrupt context, but it needs to sleep. We + * temporarily stop the net queue to establish the WLP connection. + * Setup of the WLP connection and restart of queue is scheduled + * on the default work queue. + * + * run with eda->lock held (spinlock) + */ +int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry, + void *_skb) +{ + int result = 0; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; + unsigned char *eth_addr = eda_entry->eth_addr; + struct sk_buff *skb = _skb; + struct wlp_assoc_conn_ctx *conn_ctx; + + d_fnstart(5, dev, "wlp %p\n", wlp); + d_printf(5, dev, "To neighbor %02x:%02x with eth " + "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1], + dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2], + eth_addr[3], eth_addr[4], eth_addr[5]); + if (eda_entry->state == WLP_WSS_UNCONNECTED) { + /* We don't want any more packets while we set up connection */ + BUG_ON(wlp->stop_queue == NULL); + wlp->stop_queue(wlp); + conn_ctx = kmalloc(sizeof(*conn_ctx), GFP_ATOMIC); + if (conn_ctx == NULL) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to allocate memory " + "for connection handling.\n"); + result = -ENOMEM; + goto out; + } + conn_ctx->wlp = wlp; + conn_ctx->skb = skb; + conn_ctx->eda_entry = *eda_entry; + INIT_WORK(&conn_ctx->ws, wlp_wss_connect_send); + schedule_work(&conn_ctx->ws); + result = 1; + } else if (eda_entry->state == WLP_WSS_CONNECT_FAILED) { + /* Previous connection attempts failed, don't retry - see + * conditions for connection in WLP 0.99 [7.6.2] */ + if (printk_ratelimit()) + dev_err(dev, "Could not connect to neighbor " + "previously. Not retrying. \n"); + result = -ENONET; + goto out; + } else { /* eda_entry->state == WLP_WSS_CONNECTED */ + d_printf(5, dev, "Neighbor is connected, preparing frame.\n"); + result = wlp_wss_prep_hdr(wlp, eda_entry, skb); + } +out: + d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result); + return result; +} + +/** + * Emulate broadcast: copy skb, send copy to neighbor (connect if not already) + * + * We need to copy skbs in the case where we emulate broadcast through + * unicast. We copy instead of clone because we are modifying the data of + * the frame after copying ... clones share data so we cannot emulate + * broadcast using clones. + * + * run with eda->lock held (spinlock) + */ +int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry, + void *_skb) +{ + int result = -ENOMEM; + struct device *dev = &wlp->rc->uwb_dev.dev; + struct sk_buff *skb = _skb; + struct sk_buff *copy; + struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; + + d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n", + dev_addr->data[1], dev_addr->data[0], skb); + copy = skb_copy(skb, GFP_ATOMIC); + if (copy == NULL) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to copy skb for " + "transmission.\n"); + goto out; + } + result = wlp_wss_connect_prep(wlp, eda_entry, copy); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to connect/send skb " + "to neighbor.\n"); + dev_kfree_skb_irq(copy); + goto out; + } else if (result == 1) + /* Frame will be transmitted separately */ + goto out; + BUG_ON(wlp->xmit_frame == NULL); + result = wlp->xmit_frame(wlp, copy, dev_addr); + if (result < 0) { + if (printk_ratelimit()) + dev_err(dev, "WLP: Unable to transmit frame: %d\n", + result); + if ((result == -ENXIO) && printk_ratelimit()) + dev_err(dev, "WLP: Is network interface up? \n"); + /* We could try again ... */ + dev_kfree_skb_irq(copy);/*we need to free if tx fails */ + } +out: + d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1], + dev_addr->data[0]); + return result; +} + + +/** + * Setup WSS + * + * Should be called by network driver after the interface has been given a + * MAC address. + */ +int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + int result = 0; + d_fnstart(5, dev, "wss (%p) \n", wss); + mutex_lock(&wss->mutex); + wss->kobj.parent = &net_dev->dev.kobj; + if (!is_valid_ether_addr(net_dev->dev_addr)) { + dev_err(dev, "WLP: Invalid MAC address. Cannot use for" + "virtual.\n"); + result = -EINVAL; + goto out; + } + memcpy(wss->virtual_addr.data, net_dev->dev_addr, + sizeof(wss->virtual_addr.data)); +out: + mutex_unlock(&wss->mutex); + d_fnend(5, dev, "wss (%p) \n", wss); + return result; +} +EXPORT_SYMBOL_GPL(wlp_wss_setup); + +/** + * Remove WSS + * + * Called by client that configured WSS through wlp_wss_setup(). This + * function is called when client no longer needs WSS, eg. client shuts + * down. + * + * We remove the WLP IE from the beacon before initiating local cleanup. + */ +void wlp_wss_remove(struct wlp_wss *wss) +{ + struct wlp *wlp = container_of(wss, struct wlp, wss); + struct device *dev = &wlp->rc->uwb_dev.dev; + d_fnstart(5, dev, "wss (%p) \n", wss); + mutex_lock(&wss->mutex); + if (wss->state == WLP_WSS_STATE_ACTIVE) + uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); + if (wss->state != WLP_WSS_STATE_NONE) { + sysfs_remove_group(&wss->kobj, &wss_attr_group); + kobject_put(&wss->kobj); + } + wss->kobj.parent = NULL; + memset(&wss->virtual_addr, 0, sizeof(wss->virtual_addr)); + /* Cleanup EDA cache */ + wlp_eda_release(&wlp->eda); + wlp_eda_init(&wlp->eda); + mutex_unlock(&wss->mutex); + d_fnend(5, dev, "wss (%p) \n", wss); +} +EXPORT_SYMBOL_GPL(wlp_wss_remove); -- cgit v0.10.2 From 3b0c5a3818555988b6235144e0174b1a512719b7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 17 Sep 2008 16:34:19 +0100 Subject: uwb: add WiMedia LLC Protocol (build system) Add the WLP build system (Kconfig and Kbuild files). Signed-off-by: David Vrabel diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig index a442f39..41f8984 100644 --- a/drivers/uwb/Kconfig +++ b/drivers/uwb/Kconfig @@ -57,4 +57,11 @@ config UWB_WHCI To compile this driver select Y (built in) or M (module). It is safe to select any even if you do not have the hardware. +config UWB_WLP + tristate "Support WiMedia Link Protocol (Ethernet/IP over UWB)" + depends on UWB && NET + help + This is a common library for drivers that implement + networking over UWB. + endif # UWB diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index 6bdb8e7..79f48e3 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_UWB) += uwb.o +obj-$(CONFIG_UWB_WLP) += wlp/ obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o obj-$(CONFIG_UWB_HWA) += hwa-rc.o diff --git a/drivers/uwb/wlp/Makefile b/drivers/uwb/wlp/Makefile new file mode 100644 index 0000000..c72c11d --- /dev/null +++ b/drivers/uwb/wlp/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_UWB_WLP) := wlp.o + +wlp-objs := \ + driver.o \ + eda.o \ + messages.o \ + sysfs.o \ + txrx.o \ + wlp-lc.o \ + wss-lc.o -- cgit v0.10.2 From 1ba47da527121ff704f4e9f27a12c9f32db05022 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:20 +0100 Subject: uwb: add the i1480 DFU driver Add the driver for downloading the firmware to an Intel i1480 device. Signed-off-by: David Vrabel diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig index 41f8984..59acb5b 100644 --- a/drivers/uwb/Kconfig +++ b/drivers/uwb/Kconfig @@ -64,4 +64,16 @@ config UWB_WLP This is a common library for drivers that implement networking over UWB. +config UWB_I1480U + tristate "Support for Intel Wireless UWB Link 1480 HWA" + depends on UWB_HWA + select FW_LOADER + help + This driver enables support for the i1480 when connected via + USB. It consists of a firmware uploader that will enable it + to behave as an HWA device. + + To compile this driver select Y (built in) or M (module). It + is safe to select any even if you do not have the hardware. + endif # UWB diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index 79f48e3..257e690 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_UWB) += uwb.o obj-$(CONFIG_UWB_WLP) += wlp/ obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o obj-$(CONFIG_UWB_HWA) += hwa-rc.o +obj-$(CONFIG_UWB_I1480U) += i1480/ uwb-objs := \ address.o \ diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile new file mode 100644 index 0000000..d69da16 --- /dev/null +++ b/drivers/uwb/i1480/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile new file mode 100644 index 0000000..bd1b9f2 --- /dev/null +++ b/drivers/uwb/i1480/dfu/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o + +i1480-dfu-usb-objs := \ + dfu.o \ + mac.o \ + phy.o \ + usb.o + + diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c new file mode 100644 index 0000000..ebffaf5 --- /dev/null +++ b/drivers/uwb/i1480/dfu/dfu.c @@ -0,0 +1,281 @@ +/* + * Intel Wireless UWB Link 1480 + * Main driver + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Common code for firmware upload used by the USB and PCI version; + * i1480_fw_upload() takes a device descriptor and uses the function + * pointers it provides to upload firmware and prepare the PHY. + * + * As well, provides common functions used by the rest of the code. + */ +#include "i1480-dfu.h" +#include +#include +#include +#include +#include +#include + +#define D_LOCAL 0 +#include + +/** @return 0 if If @evt is a valid reply event; otherwise complain */ +int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, + const char *cmd, u8 context, + unsigned expected_type, unsigned expected_event) +{ + int result = 0; + struct device *dev = i1480->dev; + if (rceb->bEventContext != context) { + dev_err(dev, "%s: " + "unexpected context id 0x%02x (expected 0x%02x)\n", + cmd, rceb->bEventContext, context); + result = -EINVAL; + } + if (rceb->bEventType != expected_type) { + dev_err(dev, "%s: " + "unexpected event type 0x%02x (expected 0x%02x)\n", + cmd, rceb->bEventType, expected_type); + result = -EINVAL; + } + if (le16_to_cpu(rceb->wEvent) != expected_event) { + dev_err(dev, "%s: " + "unexpected event 0x%04x (expected 0x%04x)\n", + cmd, le16_to_cpu(rceb->wEvent), expected_event); + result = -EINVAL; + } + return result; +} +EXPORT_SYMBOL_GPL(i1480_rceb_check); + + +/** + * Execute a Radio Control Command + * + * Command data has to be in i1480->cmd_buf. + * + * @returns size of the reply data filled in i1480->evt_buf or < 0 errno + * code on error. + */ +ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, + size_t reply_size) +{ + ssize_t result; + struct uwb_rceb *reply = i1480->evt_buf; + struct uwb_rccb *cmd = i1480->cmd_buf; + u16 expected_event = reply->wEvent; + u8 expected_type = reply->bEventType; + u8 context; + + d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); + init_completion(&i1480->evt_complete); + i1480->evt_result = -EINPROGRESS; + do { + get_random_bytes(&context, 1); + } while (context == 0x00 || context == 0xff); + cmd->bCommandContext = context; + result = i1480->cmd(i1480, cmd_name, cmd_size); + if (result < 0) + goto error; + /* wait for the callback to report a event was received */ + result = wait_for_completion_interruptible_timeout( + &i1480->evt_complete, HZ); + if (result == 0) { + result = -ETIMEDOUT; + goto error; + } + if (result < 0) + goto error; + result = i1480->evt_result; + if (result < 0) { + dev_err(i1480->dev, "%s: command reply reception failed: %zd\n", + cmd_name, result); + goto error; + } + if (result != reply_size) { + dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n", + cmd_name, result, reply_size); + result = -EINVAL; + goto error; + } + /* Verify we got the right event in response */ + result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, + expected_type, expected_event); +error: + d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n", + i1480, cmd_name, cmd_size, result); + return result; +} +EXPORT_SYMBOL_GPL(i1480_cmd); + + +/** + * Get information about the MAC and PHY + * + * @wa: Wired adaptor + * @neh: Notification/event handler + * @reply: Pointer to the reply event buffer + * @returns: 0 if ok, < 0 errno code on error. + */ +static +int i1480_cmd_get_mac_phy_info(struct i1480 *i1480) +{ + int result; + struct uwb_rccb *cmd = i1480->cmd_buf; + struct i1480_evt_confirm_GMPI *reply = i1480->evt_buf; + + cmd->bCommandType = i1480_CET_VS1; + cmd->wCommand = cpu_to_le16(i1480_CMD_GET_MAC_PHY_INFO); + reply->rceb.bEventType = i1480_CET_VS1; + reply->rceb.wEvent = i1480_EVT_GET_MAC_PHY_INFO; + result = i1480_cmd(i1480, "GET_MAC_PHY_INFO", sizeof(*cmd), + sizeof(*reply)); + if (result < 0) + goto out; + if (le16_to_cpu(reply->status) != 0x00) { + dev_err(i1480->dev, + "GET_MAC_PHY_INFO: command execution failed: %d\n", + reply->status); + result = -EIO; + } +out: + return result; +} + + +/** + * Get i1480's info and print it + * + * @wa: Wire Adapter + * @neh: Notification/event handler + * @returns: 0 if ok, < 0 errno code on error. + */ +static +int i1480_check_info(struct i1480 *i1480) +{ + struct i1480_evt_confirm_GMPI *reply = i1480->evt_buf; + int result; + unsigned mac_fw_rev; +#if i1480_FW <= 0x00000302 + unsigned phy_fw_rev; +#endif + if (i1480->quirk_no_check_info) { + dev_err(i1480->dev, "firmware info check disabled\n"); + return 0; + } + + result = i1480_cmd_get_mac_phy_info(i1480); + if (result < 0) { + dev_err(i1480->dev, "Cannot get MAC & PHY information: %d\n", + result); + goto out; + } + mac_fw_rev = le16_to_cpu(reply->mac_fw_rev); +#if i1480_FW > 0x00000302 + dev_info(i1480->dev, + "HW v%02hx " + "MAC FW v%02hx.%02hx caps %04hx " + "PHY type %02hx v%02hx caps %02hx %02hx %02hx\n", + reply->hw_rev, mac_fw_rev >> 8, mac_fw_rev & 0xff, + le16_to_cpu(reply->mac_caps), + reply->phy_vendor, reply->phy_rev, + reply->phy_caps[0], reply->phy_caps[1], reply->phy_caps[2]); +#else + phy_fw_rev = le16_to_cpu(reply->phy_fw_rev); + dev_info(i1480->dev, "MAC FW v%02hx.%02hx caps %04hx " + " PHY FW v%02hx.%02hx caps %04hx\n", + mac_fw_rev >> 8, mac_fw_rev & 0xff, + le16_to_cpu(reply->mac_caps), + phy_fw_rev >> 8, phy_fw_rev & 0xff, + le16_to_cpu(reply->phy_caps)); +#endif + dev_dbg(i1480->dev, + "key-stores:%hu mcast-addr-stores:%hu sec-modes:%hu\n", + (unsigned short) reply->key_stores, + le16_to_cpu(reply->mcast_addr_stores), + (unsigned short) reply->sec_mode_supported); + /* FIXME: complain if fw version too low -- pending for + * numbering to stabilize */ +out: + return result; +} + + +static +int i1480_print_state(struct i1480 *i1480) +{ + int result; + u32 *buf = (u32 *) i1480->cmd_buf; + + result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf)); + if (result < 0) { + dev_err(i1480->dev, "cannot read U & L states: %d\n", result); + goto error; + } + dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]); +error: + return result; +} + + +/* + * PCI probe, firmware uploader + * + * _mac_fw_upload() will call rc_setup(), which needs an rc_release(). + */ +int i1480_fw_upload(struct i1480 *i1480) +{ + int result; + + result = i1480_pre_fw_upload(i1480); /* PHY pre fw */ + if (result < 0 && result != -ENOENT) { + i1480_print_state(i1480); + goto error; + } + result = i1480_mac_fw_upload(i1480); /* MAC fw */ + if (result < 0) { + if (result == -ENOENT) + dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n", + i1480->mac_fw_name); + else + i1480_print_state(i1480); + goto error; + } + result = i1480_phy_fw_upload(i1480); /* PHY fw */ + if (result < 0 && result != -ENOENT) { + i1480_print_state(i1480); + goto error_rc_release; + } + result = i1480_check_info(i1480); + if (result < 0) { + dev_warn(i1480->dev, "Warning! Cannot check firmware info: %d\n", + result); + result = 0; + } + dev_info(i1480->dev, "firmware uploaded successfully\n"); +error_rc_release: + if (i1480->rc_release) + i1480->rc_release(i1480); + result = 0; +error: + return result; +} +EXPORT_SYMBOL_GPL(i1480_fw_upload); diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h new file mode 100644 index 0000000..4103b28 --- /dev/null +++ b/drivers/uwb/i1480/dfu/i1480-dfu.h @@ -0,0 +1,263 @@ +/* + * i1480 Device Firmware Upload + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This driver is the firmware uploader for the Intel Wireless UWB + * Link 1480 device (both in the USB and PCI incarnations). + * + * The process is quite simple: we stop the device, write the firmware + * to its memory and then restart it. Wait for the device to let us + * know it is done booting firmware. Ready. + * + * We might have to upload before or after a phy firmware (which might + * be done in two methods, using a normal firmware image or through + * the MPI port). + * + * Because USB and PCI use common methods, we just make ops out of the + * common operations (read, write, wait_init_done and cmd) and + * implement them in usb.c and pci.c. + * + * The flow is (some parts omitted): + * + * i1480_{usb,pci}_probe() On enumerate/discovery + * i1480_fw_upload() + * i1480_pre_fw_upload() + * __mac_fw_upload() + * fw_hdrs_load() + * mac_fw_hdrs_push() + * i1480->write() [i1480_{usb,pci}_write()] + * i1480_fw_cmp() + * i1480->read() [i1480_{usb,pci}_read()] + * i1480_mac_fw_upload() + * __mac_fw_upload() + * i1480->setup(() + * i1480->wait_init_done() + * i1480_cmd_reset() + * i1480->cmd() [i1480_{usb,pci}_cmd()] + * ... + * i1480_phy_fw_upload() + * request_firmware() + * i1480_mpi_write() + * i1480->cmd() [i1480_{usb,pci}_cmd()] + * i1480_check_info() + * + * Once the probe function enumerates the device and uploads the + * firmware, we just exit with -ENODEV, as we don't really want to + * attach to the device. + */ +#ifndef __i1480_DFU_H__ +#define __i1480_DFU_H__ + +#include +#include +#include + +#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018)) + +#if i1480_FW > 0x00000302 +#define i1480_RCEB_EXTENDED +#endif + +struct uwb_rccb; +struct uwb_rceb; + +/* + * Common firmware upload handlers + * + * Normally you embed this struct in another one specific to your hw. + * + * @write Write to device's memory from buffer. + * @read Read from device's memory to i1480->evt_buf. + * @setup Setup device after basic firmware is uploaded + * @wait_init_done + * Wait for the device to send a notification saying init + * is done. + * @cmd FOP for issuing the command to the hardware. The + * command data is contained in i1480->cmd_buf and the size + * is supplied as an argument. The command replied is put + * in i1480->evt_buf and the size in i1480->evt_result (or if + * an error, a < 0 errno code). + * + * @cmd_buf Memory buffer used to send commands to the device. + * Allocated by the upper layers i1480_fw_upload(). + * Size has to be @buf_size. + * @evt_buf Memory buffer used to place the async notifications + * received by the hw. Allocated by the upper layers + * i1480_fw_upload(). + * Size has to be @buf_size. + * @cmd_complete + * Low level driver uses this to notify code waiting afor + * an event that the event has arrived and data is in + * i1480->evt_buf (and size/result in i1480->evt_result). + * @hw_rev + * Use this value to activate dfu code to support new revisions + * of hardware. i1480_init() sets this to a default value. + * It should be updated by the USB and PCI code. + */ +struct i1480 { + struct device *dev; + + int (*write)(struct i1480 *, u32 addr, const void *, size_t); + int (*read)(struct i1480 *, u32 addr, size_t); + int (*rc_setup)(struct i1480 *); + void (*rc_release)(struct i1480 *); + int (*wait_init_done)(struct i1480 *); + int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size); + const char *pre_fw_name; + const char *mac_fw_name; + const char *mac_fw_name_deprecate; /* FIXME: Will go away */ + const char *phy_fw_name; + u8 hw_rev; + + size_t buf_size; /* size of both evt_buf and cmd_buf */ + void *evt_buf, *cmd_buf; + ssize_t evt_result; + struct completion evt_complete; + + u8 quirk_no_check_info:1; +}; + +static inline +void i1480_init(struct i1480 *i1480) +{ + i1480->hw_rev = 1; + init_completion(&i1480->evt_complete); +} + +extern int i1480_fw_upload(struct i1480 *); +extern int i1480_pre_fw_upload(struct i1480 *); +extern int i1480_mac_fw_upload(struct i1480 *); +extern int i1480_phy_fw_upload(struct i1480 *); +extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t); +extern int i1480_rceb_check(const struct i1480 *, + const struct uwb_rceb *, const char *, u8, + unsigned, unsigned); + +enum { + /* Vendor specific command type */ + i1480_CET_VS1 = 0xfd, + /* i1480 commands */ + i1480_CMD_SET_IP_MAS = 0x000e, + i1480_CMD_GET_MAC_PHY_INFO = 0x0003, + i1480_CMD_MPI_WRITE = 0x000f, + i1480_CMD_MPI_READ = 0x0010, + /* i1480 events */ +#if i1480_FW > 0x00000302 + i1480_EVT_CONFIRM = 0x0002, + i1480_EVT_RM_INIT_DONE = 0x0101, + i1480_EVT_DEV_ADD = 0x0103, + i1480_EVT_DEV_RM = 0x0104, + i1480_EVT_DEV_ID_CHANGE = 0x0105, + i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO, +#else + i1480_EVT_CONFIRM = 0x0002, + i1480_EVT_RM_INIT_DONE = 0x0101, + i1480_EVT_DEV_ADD = 0x0103, + i1480_EVT_DEV_RM = 0x0104, + i1480_EVT_DEV_ID_CHANGE = 0x0105, + i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM, +#endif +}; + + +struct i1480_evt_confirm { + struct uwb_rceb rceb; +#ifdef i1480_RCEB_EXTENDED + __le16 wParamLength; +#endif + u8 bResultCode; +} __attribute__((packed)); + + +struct i1480_rceb { + struct uwb_rceb rceb; +#ifdef i1480_RCEB_EXTENDED + __le16 wParamLength; +#endif +} __attribute__((packed)); + + +/** + * Get MAC & PHY Information confirm event structure + * + * Confirm event returned by the command. + */ +struct i1480_evt_confirm_GMPI { +#if i1480_FW > 0x00000302 + struct uwb_rceb rceb; + __le16 wParamLength; + __le16 status; + u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */ + u8 dev_addr[2]; + __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */ + u8 hw_rev; + u8 phy_vendor; + u8 phy_rev; /* major v = >> 8; minor = v & 0xff */ + __le16 mac_caps; + u8 phy_caps[3]; + u8 key_stores; + __le16 mcast_addr_stores; + u8 sec_mode_supported; +#else + struct uwb_rceb rceb; + u8 status; + u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */ + u8 dev_addr[2]; + __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */ + __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */ + __le16 mac_caps; + u8 phy_caps; + u8 key_stores; + __le16 mcast_addr_stores; + u8 sec_mode_supported; +#endif +} __attribute__((packed)); + + +struct i1480_cmd_mpi_write { + struct uwb_rccb rccb; + __le16 size; + u8 data[]; +}; + + +struct i1480_cmd_mpi_read { + struct uwb_rccb rccb; + __le16 size; + struct { + u8 page, offset; + } __attribute__((packed)) data[]; +} __attribute__((packed)); + + +struct i1480_evt_mpi_read { + struct uwb_rceb rceb; +#ifdef i1480_RCEB_EXTENDED + __le16 wParamLength; +#endif + u8 bResultCode; + __le16 size; + struct { + u8 page, offset, value; + } __attribute__((packed)) data[]; +} __attribute__((packed)); + + +#endif /* #ifndef __i1480_DFU_H__ */ diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c new file mode 100644 index 0000000..3d44554 --- /dev/null +++ b/drivers/uwb/i1480/dfu/mac.c @@ -0,0 +1,529 @@ +/* + * Intel Wireless UWB Link 1480 + * MAC Firmware upload implementation + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Implementation of the code for parsing the firmware file (extract + * the headers and binary code chunks) in the fw_*() functions. The + * code to upload pre and mac firmwares is the same, so it uses a + * common entry point in __mac_fw_upload(), which uses the i1480 + * function pointers to push the firmware to the device. + */ +#include +#include +#include +#include "i1480-dfu.h" + +#define D_LOCAL 0 +#include + +/* + * Descriptor for a continuous segment of MAC fw data + */ +struct fw_hdr { + unsigned long address; + size_t length; + const u32 *bin; + struct fw_hdr *next; +}; + + +/* Free a chain of firmware headers */ +static +void fw_hdrs_free(struct fw_hdr *hdr) +{ + struct fw_hdr *next; + + while (hdr) { + next = hdr->next; + kfree(hdr); + hdr = next; + } +} + + +/* Fill a firmware header descriptor from a memory buffer */ +static +int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt, + const char *_data, const u32 *data_itr, const u32 *data_top) +{ + size_t hdr_offset = (const char *) data_itr - _data; + size_t remaining_size = (void *) data_top - (void *) data_itr; + if (data_itr + 2 > data_top) { + dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at " + "offset %zu, limit %zu\n", + hdr_cnt, hdr_offset, + (const char *) data_itr + 2 - _data, + (const char *) data_top - _data); + return -EINVAL; + } + hdr->next = NULL; + hdr->address = le32_to_cpu(*data_itr++); + hdr->length = le32_to_cpu(*data_itr++); + hdr->bin = data_itr; + if (hdr->length > remaining_size) { + dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; " + "chunk too long (%zu bytes), only %zu left\n", + hdr_cnt, hdr_offset, hdr->length, remaining_size); + return -EINVAL; + } + return 0; +} + + +/** + * Get a buffer where the firmware is supposed to be and create a + * chain of headers linking them together. + * + * @phdr: where to place the pointer to the first header (headers link + * to the next via the @hdr->next ptr); need to free the whole + * chain when done. + * + * @_data: Pointer to the data buffer. + * + * @_data_size: Size of the data buffer (bytes); data size has to be a + * multiple of 4. Function will fail if not. + * + * Goes over the whole binary blob; reads the first chunk and creates + * a fw hdr from it (which points to where the data is in @_data and + * the length of the chunk); then goes on to the next chunk until + * done. Each header is linked to the next. + */ +static +int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr, + const char *_data, size_t data_size) +{ + int result; + unsigned hdr_cnt = 0; + u32 *data = (u32 *) _data, *data_itr, *data_top; + struct fw_hdr *hdr, **prev_hdr = phdr; + + result = -EINVAL; + /* Check size is ok and pointer is aligned */ + if (data_size % sizeof(u32) != 0) + goto error; + if ((unsigned long) _data % sizeof(u16) != 0) + goto error; + *phdr = NULL; + data_itr = data; + data_top = (u32 *) (_data + data_size); + while (data_itr < data_top) { + result = -ENOMEM; + hdr = kmalloc(sizeof(*hdr), GFP_KERNEL); + if (hdr == NULL) { + dev_err(i1480->dev, "Cannot allocate fw header " + "for chunk #%u\n", hdr_cnt); + goto error_alloc; + } + result = fw_hdr_load(i1480, hdr, hdr_cnt, + _data, data_itr, data_top); + if (result < 0) + goto error_load; + data_itr += 2 + hdr->length; + *prev_hdr = hdr; + prev_hdr = &hdr->next; + hdr_cnt++; + }; + *prev_hdr = NULL; + return 0; + +error_load: + kfree(hdr); +error_alloc: + fw_hdrs_free(*phdr); +error: + return result; +} + + +/** + * Compares a chunk of fw with one in the devices's memory + * + * @i1480: Device instance + * @hdr: Pointer to the firmware chunk + * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset + * where the difference was found (plus one). + * + * Kind of dirty and simplistic, but does the trick in both the PCI + * and USB version. We do a quick[er] memcmp(), and if it fails, we do + * a byte-by-byte to find the offset. + */ +static +ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr) +{ + ssize_t result = 0; + u32 src_itr = 0, cnt; + size_t size = hdr->length*sizeof(hdr->bin[0]); + size_t chunk_size; + u8 *bin = (u8 *) hdr->bin; + + while (size > 0) { + chunk_size = size < i1480->buf_size ? size : i1480->buf_size; + result = i1480->read(i1480, hdr->address + src_itr, chunk_size); + if (result < 0) { + dev_err(i1480->dev, "error reading for verification: " + "%zd\n", result); + goto error; + } + if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { + u8 *buf = i1480->cmd_buf; + d_printf(2, i1480->dev, + "original data @ %p + %u, %zu bytes\n", + bin, src_itr, result); + d_dump(4, i1480->dev, bin + src_itr, result); + for (cnt = 0; cnt < result; cnt++) + if (bin[src_itr + cnt] != buf[cnt]) { + dev_err(i1480->dev, "byte failed at " + "src_itr %u cnt %u [0x%02x " + "vs 0x%02x]\n", src_itr, cnt, + bin[src_itr + cnt], buf[cnt]); + result = src_itr + cnt + 1; + goto cmp_failed; + } + } + src_itr += result; + size -= result; + } + result = 0; +error: +cmp_failed: + return result; +} + + +/** + * Writes firmware headers to the device. + * + * @prd: PRD instance + * @hdr: Processed firmware + * @returns: 0 if ok, < 0 errno on error. + */ +static +int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr, + const char *fw_name, const char *fw_tag) +{ + struct device *dev = i1480->dev; + ssize_t result = 0; + struct fw_hdr *hdr_itr; + int verif_retry_count; + + d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr); + /* Now, header by header, push them to the hw */ + for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { + verif_retry_count = 0; +retry: + dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n", + hdr_itr->length * sizeof(hdr_itr->bin[0]), + hdr_itr->address); + result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin, + hdr_itr->length*sizeof(hdr_itr->bin[0])); + if (result < 0) { + dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):" + " %zd\n", fw_tag, fw_name, + hdr_itr->length * sizeof(hdr_itr->bin[0]), + hdr_itr->address, result); + break; + } + result = i1480_fw_cmp(i1480, hdr_itr); + if (result < 0) { + dev_err(dev, "%s fw '%s': verification read " + "failed (%zuB @ 0x%lx): %zd\n", + fw_tag, fw_name, + hdr_itr->length * sizeof(hdr_itr->bin[0]), + hdr_itr->address, result); + break; + } + if (result > 0) { /* Offset where it failed + 1 */ + result--; + dev_err(dev, "%s fw '%s': WARNING: verification " + "failed at 0x%lx: retrying\n", + fw_tag, fw_name, hdr_itr->address + result); + if (++verif_retry_count < 3) + goto retry; /* write this block again! */ + dev_err(dev, "%s fw '%s': verification failed at 0x%lx: " + "tried %d times\n", fw_tag, fw_name, + hdr_itr->address + result, verif_retry_count); + result = -EINVAL; + break; + } + } + d_fnend(3, dev, "(%zd)\n", result); + return result; +} + + +/** Puts the device in firmware upload mode.*/ +static +int mac_fw_upload_enable(struct i1480 *i1480) +{ + int result; + u32 reg = 0x800000c0; + u32 *buffer = (u32 *)i1480->cmd_buf; + + if (i1480->hw_rev > 1) + reg = 0x8000d0d4; + result = i1480->read(i1480, reg, sizeof(u32)); + if (result < 0) + goto error_cmd; + *buffer &= ~i1480_FW_UPLOAD_MODE_MASK; + result = i1480->write(i1480, reg, buffer, sizeof(u32)); + if (result < 0) + goto error_cmd; + return 0; +error_cmd: + dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result); + return result; +} + + +/** Gets the device out of firmware upload mode. */ +static +int mac_fw_upload_disable(struct i1480 *i1480) +{ + int result; + u32 reg = 0x800000c0; + u32 *buffer = (u32 *)i1480->cmd_buf; + + if (i1480->hw_rev > 1) + reg = 0x8000d0d4; + result = i1480->read(i1480, reg, sizeof(u32)); + if (result < 0) + goto error_cmd; + *buffer |= i1480_FW_UPLOAD_MODE_MASK; + result = i1480->write(i1480, reg, buffer, sizeof(u32)); + if (result < 0) + goto error_cmd; + return 0; +error_cmd: + dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result); + return result; +} + + + +/** + * Generic function for uploading a MAC firmware. + * + * @i1480: Device instance + * @fw_name: Name of firmware file to upload. + * @fw_tag: Name of the firmware type (for messages) + * [eg: MAC, PRE] + * @do_wait: Wait for device to emit initialization done message (0 + * for PRE fws, 1 for MAC fws). + * @returns: 0 if ok, < 0 errno on error. + */ +static +int __mac_fw_upload(struct i1480 *i1480, const char *fw_name, + const char *fw_tag) +{ + int result; + const struct firmware *fw; + struct fw_hdr *fw_hdrs; + + d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag); + result = request_firmware(&fw, fw_name, i1480->dev); + if (result < 0) /* Up to caller to complain on -ENOENT */ + goto out; + d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name); + result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); + if (result < 0) { + dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " + "file: %d\n", fw_tag, fw_name, result); + goto out_release; + } + result = mac_fw_upload_enable(i1480); + if (result < 0) + goto out_hdrs_release; + result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag); + mac_fw_upload_disable(i1480); +out_hdrs_release: + if (result >= 0) + dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name); + else + dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), " + "power cycle device\n", fw_tag, fw_name, result); + fw_hdrs_free(fw_hdrs); +out_release: + release_firmware(fw); +out: + d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag, + result); + return result; +} + + +/** + * Upload a pre-PHY firmware + * + */ +int i1480_pre_fw_upload(struct i1480 *i1480) +{ + int result; + result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE"); + if (result == 0) + msleep(400); + return result; +} + + +/** + * Reset a the MAC and PHY + * + * @i1480: Device's instance + * @returns: 0 if ok, < 0 errno code on error + * + * We put the command on kmalloc'ed memory as some arches cannot do + * USB from the stack. The reply event is copied from an stage buffer, + * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. + * + * We issue the reset to make sure the UWB controller reinits the PHY; + * this way we can now if the PHY init went ok. + */ +static +int i1480_cmd_reset(struct i1480 *i1480) +{ + int result; + struct uwb_rccb *cmd = (void *) i1480->cmd_buf; + struct i1480_evt_reset { + struct uwb_rceb rceb; + u8 bResultCode; + } __attribute__((packed)) *reply = (void *) i1480->evt_buf; + + result = -ENOMEM; + cmd->bCommandType = UWB_RC_CET_GENERAL; + cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET); + reply->rceb.bEventType = UWB_RC_CET_GENERAL; + reply->rceb.wEvent = UWB_RC_CMD_RESET; + result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply)); + if (result < 0) + goto out; + if (reply->bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(i1480->dev, "RESET: command execution failed: %u\n", + reply->bResultCode); + result = -EIO; + } +out: + return result; + +} + + +/** Wait for the MAC FW to start running */ +static +int i1480_fw_is_running_q(struct i1480 *i1480) +{ + int cnt = 0; + int result; + u32 *val = (u32 *) i1480->cmd_buf; + + d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480); + for (cnt = 0; cnt < 10; cnt++) { + msleep(100); + result = i1480->read(i1480, 0x80080000, 4); + if (result < 0) { + dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result); + goto out; + } + if (*val == 0x55555555UL) /* fw running? cool */ + goto out; + if (printk_ratelimit()) + d_printf(5, i1480->dev, "read #%d: 0x%08x\n", cnt, *val); + } + dev_err(i1480->dev, "Timed out waiting for fw to start\n"); + result = -ETIMEDOUT; +out: + d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); + return result; + +} + + +/** + * Upload MAC firmware, wait for it to start + * + * @i1480: Device instance + * @fw_name: Name of the file that contains the firmware + * + * This has to be called after the pre fw has been uploaded (if + * there is any). + */ +int i1480_mac_fw_upload(struct i1480 *i1480) +{ + int result = 0, deprecated_name = 0; + struct i1480_rceb *rcebe = (void *) i1480->evt_buf; + + d_fnstart(3, i1480->dev, "(%p)\n", i1480); + result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); + if (result == -ENOENT) { + result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, + "MAC"); + deprecated_name = 1; + } + if (result < 0) + return result; + if (deprecated_name == 1) + dev_warn(i1480->dev, + "WARNING: firmware file name %s is deprecated, " + "please rename to %s\n", + i1480->mac_fw_name_deprecate, i1480->mac_fw_name); + result = i1480_fw_is_running_q(i1480); + if (result < 0) + goto error_fw_not_running; + result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0; + if (result < 0) { + dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n", + result); + goto error_setup; + } + result = i1480->wait_init_done(i1480); /* wait init'on */ + if (result < 0) { + dev_err(i1480->dev, "MAC fw '%s': Initialization timed out " + "(%d)\n", i1480->mac_fw_name, result); + goto error_init_timeout; + } + /* verify we got the right initialization done event */ + if (i1480->evt_result != sizeof(*rcebe)) { + dev_err(i1480->dev, "MAC fw '%s': initialization event returns " + "wrong size (%zu bytes vs %zu needed)\n", + i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); + dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32)); + goto error_size; + } + result = -EIO; + if (rcebe->rceb.bEventType != i1480_CET_VS1 + || le16_to_cpu(rcebe->rceb.wEvent) != i1480_EVT_RM_INIT_DONE) { + dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x " + "received; expected 0x%02x/%04x/00\n", + rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent), + rcebe->rceb.bEventContext, i1480_CET_VS1, + i1480_EVT_RM_INIT_DONE); + goto error_init_timeout; + } + result = i1480_cmd_reset(i1480); + if (result < 0) + dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n", + i1480->mac_fw_name, result); +error_fw_not_running: +error_init_timeout: +error_size: +error_setup: + d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); + return result; +} diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c new file mode 100644 index 0000000..3b1a87d --- /dev/null +++ b/drivers/uwb/i1480/dfu/phy.c @@ -0,0 +1,203 @@ +/* + * Intel Wireless UWB Link 1480 + * PHY parameters upload + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Code for uploading the PHY parameters to the PHY through the UWB + * Radio Control interface. + * + * We just send the data through the MPI interface using HWA-like + * commands and then reset the PHY to make sure it is ok. + */ +#include +#include +#include +#include +#include "i1480-dfu.h" + + +/** + * Write a value array to an address of the MPI interface + * + * @i1480: Device descriptor + * @data: Data array to write + * @size: Size of the data array + * @returns: 0 if ok, < 0 errno code on error. + * + * The data array is organized into pairs: + * + * ADDRESS VALUE + * + * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has + * to be a multiple of three. + */ +static +int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size) +{ + int result; + struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf; + struct i1480_evt_confirm *reply = i1480->evt_buf; + + BUG_ON(size > 480); + result = -ENOMEM; + cmd->rccb.bCommandType = i1480_CET_VS1; + cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE); + cmd->size = cpu_to_le16(size); + memcpy(cmd->data, data, size); + reply->rceb.bEventType = i1480_CET_VS1; + reply->rceb.wEvent = i1480_CMD_MPI_WRITE; + result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply)); + if (result < 0) + goto out; + if (reply->bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n", + reply->bResultCode); + result = -EIO; + } +out: + return result; +} + + +/** + * Read a value array to from an address of the MPI interface + * + * @i1480: Device descriptor + * @data: where to place the read array + * @srcaddr: Where to read from + * @size: Size of the data read array + * @returns: 0 if ok, < 0 errno code on error. + * + * The command data array is organized into pairs ADDR0 ADDR1..., and + * the returned data in ADDR0 VALUE0 ADDR1 VALUE1... + * + * We generate the command array to be a sequential read and then + * rearrange the result. + * + * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply. + * + * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount + * of values we can read is (512 - sizeof(*reply)) / 3 + */ +static +int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size) +{ + int result; + struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf; + struct i1480_evt_mpi_read *reply = i1480->evt_buf; + unsigned cnt; + + memset(i1480->cmd_buf, 0x69, 512); + memset(i1480->evt_buf, 0x69, 512); + + BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3); + result = -ENOMEM; + cmd->rccb.bCommandType = i1480_CET_VS1; + cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ); + cmd->size = cpu_to_le16(3*size); + for (cnt = 0; cnt < size; cnt++) { + cmd->data[cnt].page = (srcaddr + cnt) >> 8; + cmd->data[cnt].offset = (srcaddr + cnt) & 0xff; + } + reply->rceb.bEventType = i1480_CET_VS1; + reply->rceb.wEvent = i1480_CMD_MPI_READ; + result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size, + sizeof(*reply) + 3*size); + if (result < 0) + goto out; + if (reply->bResultCode != UWB_RC_RES_SUCCESS) { + dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n", + reply->bResultCode); + result = -EIO; + } + for (cnt = 0; cnt < size; cnt++) { + if (reply->data[cnt].page != (srcaddr + cnt) >> 8) + dev_err(i1480->dev, "MPI-READ: page inconsistency at " + "index %u: expected 0x%02x, got 0x%02x\n", cnt, + (srcaddr + cnt) >> 8, reply->data[cnt].page); + if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff)) + dev_err(i1480->dev, "MPI-READ: offset inconsistency at " + "index %u: expected 0x%02x, got 0x%02x\n", cnt, + (srcaddr + cnt) & 0x00ff, + reply->data[cnt].offset); + data[cnt] = reply->data[cnt].value; + } + result = 0; +out: + return result; +} + + +/** + * Upload a PHY firmware, wait for it to start + * + * @i1480: Device instance + * @fw_name: Name of the file that contains the firmware + * + * We assume the MAC fw is up and running. This means we can use the + * MPI interface to write the PHY firmware. Once done, we issue an + * MBOA Reset, which will force the MAC to reset and reinitialize the + * PHY. If that works, we are ready to go. + * + * Max packet size for the MPI write is 512, so the max buffer is 480 + * (which gives us 160 byte triads of MSB, LSB and VAL for the data). + */ +int i1480_phy_fw_upload(struct i1480 *i1480) +{ + int result; + const struct firmware *fw; + const char *data_itr, *data_top; + const size_t MAX_BLK_SIZE = 480; /* 160 triads */ + size_t data_size; + u8 phy_stat; + + result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev); + if (result < 0) + goto out; + /* Loop writing data in chunks as big as possible until done. */ + for (data_itr = fw->data, data_top = data_itr + fw->size; + data_itr < data_top; data_itr += MAX_BLK_SIZE) { + data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr)); + result = i1480_mpi_write(i1480, data_itr, data_size); + if (result < 0) + goto error_mpi_write; + } + /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */ + result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1); + if (result < 0) { + dev_err(i1480->dev, "PHY: can't get status: %d\n", result); + goto error_mpi_status; + } + if (phy_stat != 0) { + result = -ENODEV; + dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat); + goto error_phy_status; + } + dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name); +error_phy_status: +error_mpi_status: +error_mpi_write: + release_firmware(fw); + if (result < 0) + dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), " + "power cycle device\n", i1480->phy_fw_name, result); +out: + return result; +} diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c new file mode 100644 index 0000000..98eeeff --- /dev/null +++ b/drivers/uwb/i1480/dfu/usb.c @@ -0,0 +1,500 @@ +/* + * Intel Wireless UWB Link 1480 + * USB SKU firmware upload implementation + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This driver will prepare the i1480 device to behave as a real + * Wireless USB HWA adaptor by uploading the firmware. + * + * When the device is connected or driver is loaded, i1480_usb_probe() + * is called--this will allocate and initialize the device structure, + * fill in the pointers to the common functions (read, write, + * wait_init_done and cmd for HWA command execution) and once that is + * done, call the common firmware uploading routine. Then clean up and + * return -ENODEV, as we don't attach to the device. + * + * The rest are the basic ops we implement that the fw upload code + * uses to do its job. All the ops in the common code are i1480->NAME, + * the functions are i1480_usb_NAME(). + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "i1480-dfu.h" + +#define D_LOCAL 0 +#include + + +struct i1480_usb { + struct i1480 i1480; + struct usb_device *usb_dev; + struct usb_interface *usb_iface; + struct urb *neep_urb; /* URB for reading from EP1 */ +}; + + +static +void i1480_usb_init(struct i1480_usb *i1480_usb) +{ + i1480_init(&i1480_usb->i1480); +} + + +static +int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface) +{ + struct usb_device *usb_dev = interface_to_usbdev(iface); + int result = -ENOMEM; + + i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ + i1480_usb->usb_iface = usb_get_intf(iface); + usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */ + i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL); + if (i1480_usb->neep_urb == NULL) + goto error; + return 0; + +error: + usb_set_intfdata(iface, NULL); + usb_put_intf(iface); + usb_put_dev(usb_dev); + return result; +} + + +static +void i1480_usb_destroy(struct i1480_usb *i1480_usb) +{ + usb_kill_urb(i1480_usb->neep_urb); + usb_free_urb(i1480_usb->neep_urb); + usb_set_intfdata(i1480_usb->usb_iface, NULL); + usb_put_intf(i1480_usb->usb_iface); + usb_put_dev(i1480_usb->usb_dev); +} + + +/** + * Write a buffer to a memory address in the i1480 device + * + * @i1480: i1480 instance + * @memory_address: + * Address where to write the data buffer to. + * @buffer: Buffer to the data + * @size: Size of the buffer [has to be < 512]. + * @returns: 0 if ok, < 0 errno code on error. + * + * Data buffers to USB cannot be on the stack or in vmalloc'ed areas, + * so we copy it to the local i1480 buffer before proceeding. In any + * case, we have a max size we can send, soooo. + */ +static +int i1480_usb_write(struct i1480 *i1480, u32 memory_address, + const void *buffer, size_t size) +{ + int result = 0; + struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); + size_t buffer_size, itr = 0; + + d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n", + i1480, memory_address, buffer, size); + BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ + while (size > 0) { + buffer_size = size < i1480->buf_size ? size : i1480->buf_size; + memcpy(i1480->cmd_buf, buffer + itr, buffer_size); + result = usb_control_msg( + i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), + 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + cpu_to_le16(memory_address & 0xffff), + cpu_to_le16((memory_address >> 16) & 0xffff), + i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); + if (result < 0) + break; + d_printf(3, i1480->dev, + "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n", + memory_address, result, buffer_size); + d_dump(4, i1480->dev, i1480->cmd_buf, result); + itr += result; + memory_address += result; + size -= result; + } + d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n", + i1480, memory_address, buffer, size, result); + return result; +} + + +/** + * Read a block [max size 512] of the device's memory to @i1480's buffer. + * + * @i1480: i1480 instance + * @memory_address: + * Address where to read from. + * @size: Size to read. Smaller than or equal to 512. + * @returns: >= 0 number of bytes written if ok, < 0 errno code on error. + * + * NOTE: if the memory address or block is incorrect, you might get a + * stall or a different memory read. Caller has to verify the + * memory address and size passed back in the @neh structure. + */ +static +int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) +{ + ssize_t result = 0, bytes = 0; + size_t itr, read_size = i1480->buf_size; + struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); + + d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n", + i1480, addr, size); + BUG_ON(size > i1480->buf_size); + BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ + BUG_ON(read_size > 512); + + if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */ + read_size = 4; + + for (itr = 0; itr < size; itr += read_size) { + size_t itr_addr = addr + itr; + size_t itr_size = min(read_size, size - itr); + result = usb_control_msg( + i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0), + 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + cpu_to_le16(itr_addr & 0xffff), + cpu_to_le16((itr_addr >> 16) & 0xffff), + i1480->cmd_buf + itr, itr_size, + 100 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(i1480->dev, "%s: USB read error: %zd\n", + __func__, result); + goto out; + } + if (result != itr_size) { + result = -EIO; + dev_err(i1480->dev, + "%s: partial read got only %zu bytes vs %zu expected\n", + __func__, result, itr_size); + goto out; + } + bytes += result; + } + result = bytes; +out: + d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n", + i1480, addr, size, result); + if (result > 0) + d_dump(4, i1480->dev, i1480->cmd_buf, result); + return result; +} + + +/** + * Callback for reads on the notification/event endpoint + * + * Just enables the completion read handler. + */ +static +void i1480_usb_neep_cb(struct urb *urb) +{ + struct i1480 *i1480 = urb->context; + struct device *dev = i1480->dev; + + switch (urb->status) { + case 0: + break; + case -ECONNRESET: /* Not an error, but a controlled situation; */ + case -ENOENT: /* (we killed the URB)...so, no broadcast */ + dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status); + break; + case -ESHUTDOWN: /* going away! */ + dev_dbg(dev, "NEEP: down %d\n", urb->status); + break; + default: + dev_err(dev, "NEEP: unknown status %d\n", urb->status); + break; + } + i1480->evt_result = urb->actual_length; + complete(&i1480->evt_complete); + return; +} + + +/** + * Wait for the MAC FW to initialize + * + * MAC FW sends a 0xfd/0101/00 notification to EP1 when done + * initializing. Get that notification into i1480->evt_buf; upper layer + * will verify it. + * + * Set i1480->evt_result with the result of getting the event or its + * size (if succesful). + * + * Delivers the data directly to i1480->evt_buf + */ +static +int i1480_usb_wait_init_done(struct i1480 *i1480) +{ + int result; + struct device *dev = i1480->dev; + struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); + struct usb_endpoint_descriptor *epd; + + d_fnstart(3, dev, "(%p)\n", i1480); + init_completion(&i1480->evt_complete); + i1480->evt_result = -EINPROGRESS; + epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; + usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev, + usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress), + i1480->evt_buf, i1480->buf_size, + i1480_usb_neep_cb, i1480, epd->bInterval); + result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL); + if (result < 0) { + dev_err(dev, "init done: cannot submit NEEP read: %d\n", + result); + goto error_submit; + } + /* Wait for the USB callback to get the data */ + result = wait_for_completion_interruptible_timeout( + &i1480->evt_complete, HZ); + if (result <= 0) { + result = result == 0 ? -ETIMEDOUT : result; + goto error_wait; + } + usb_kill_urb(i1480_usb->neep_urb); + d_fnend(3, dev, "(%p) = 0\n", i1480); + return 0; + +error_wait: + usb_kill_urb(i1480_usb->neep_urb); +error_submit: + i1480->evt_result = result; + d_fnend(3, dev, "(%p) = %d\n", i1480, result); + return result; +} + + +/** + * Generic function for issuing commands to the i1480 + * + * @i1480: i1480 instance + * @cmd_name: Name of the command (for error messages) + * @cmd: Pointer to command buffer + * @cmd_size: Size of the command buffer + * @reply: Buffer for the reply event + * @reply_size: Expected size back (including RCEB); the reply buffer + * is assumed to be as big as this. + * @returns: >= 0 size of the returned event data if ok, + * < 0 errno code on error. + * + * Arms the NE handle, issues the command to the device and checks the + * basics of the reply event. + */ +static +int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) +{ + int result; + struct device *dev = i1480->dev; + struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); + struct usb_endpoint_descriptor *epd; + struct uwb_rccb *cmd = i1480->cmd_buf; + u8 iface_no; + + d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); + /* Post a read on the notification & event endpoint */ + iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; + epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; + usb_fill_int_urb( + i1480_usb->neep_urb, i1480_usb->usb_dev, + usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress), + i1480->evt_buf, i1480->buf_size, + i1480_usb_neep_cb, i1480, epd->bInterval); + result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL); + if (result < 0) { + dev_err(dev, "%s: cannot submit NEEP read: %d\n", + cmd_name, result); + goto error_submit_ep1; + } + /* Now post the command on EP0 */ + result = usb_control_msg( + i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), + WA_EXEC_RC_CMD, + USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, + 0, iface_no, + cmd, cmd_size, + 100 /* FIXME: this is totally arbitrary */); + if (result < 0) { + dev_err(dev, "%s: control request failed: %d\n", + cmd_name, result); + goto error_submit_ep0; + } + d_fnend(3, dev, "(%p, %s, %zu) = %d\n", + i1480, cmd_name, cmd_size, result); + return result; + +error_submit_ep0: + usb_kill_urb(i1480_usb->neep_urb); +error_submit_ep1: + d_fnend(3, dev, "(%p, %s, %zu) = %d\n", + i1480, cmd_name, cmd_size, result); + return result; +} + + +/* + * Probe a i1480 device for uploading firmware. + * + * We attach only to interface #0, which is the radio control interface. + */ +static +int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) +{ + struct i1480_usb *i1480_usb; + struct i1480 *i1480; + struct device *dev = &iface->dev; + int result; + + result = -ENODEV; + if (iface->cur_altsetting->desc.bInterfaceNumber != 0) { + dev_dbg(dev, "not attaching to iface %d\n", + iface->cur_altsetting->desc.bInterfaceNumber); + goto error; + } + if (iface->num_altsetting > 1 + && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { + /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ + result = usb_set_interface(interface_to_usbdev(iface), 0, 1); + if (result < 0) + dev_warn(dev, + "can't set altsetting 1 on iface 0: %d\n", + result); + } + + result = -ENOMEM; + i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); + if (i1480_usb == NULL) { + dev_err(dev, "Unable to allocate instance\n"); + goto error; + } + i1480_usb_init(i1480_usb); + + i1480 = &i1480_usb->i1480; + i1480->buf_size = 512; + i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL); + if (i1480->cmd_buf == NULL) { + dev_err(dev, "Cannot allocate transfer buffers\n"); + result = -ENOMEM; + goto error_buf_alloc; + } + i1480->evt_buf = i1480->cmd_buf + i1480->buf_size; + + result = i1480_usb_create(i1480_usb, iface); + if (result < 0) { + dev_err(dev, "Cannot create instance: %d\n", result); + goto error_create; + } + + /* setup the fops and upload the firmare */ + i1480->pre_fw_name = "i1480-pre-phy-0.0.bin"; + i1480->mac_fw_name = "i1480-usb-0.0.bin"; + i1480->mac_fw_name_deprecate = "ptc-0.0.bin"; + i1480->phy_fw_name = "i1480-phy-0.0.bin"; + i1480->dev = &iface->dev; + i1480->write = i1480_usb_write; + i1480->read = i1480_usb_read; + i1480->rc_setup = NULL; + i1480->wait_init_done = i1480_usb_wait_init_done; + i1480->cmd = i1480_usb_cmd; + + result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */ + if (result >= 0) { + usb_reset_device(i1480_usb->usb_dev); + result = -ENODEV; /* we don't want to bind to the iface */ + } + i1480_usb_destroy(i1480_usb); +error_create: + kfree(i1480->cmd_buf); +error_buf_alloc: + kfree(i1480_usb); +error: + return result; +} + +#define i1480_USB_DEV(v, p) \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ + | USB_DEVICE_ID_MATCH_DEV_INFO \ + | USB_DEVICE_ID_MATCH_INT_INFO, \ + .idVendor = (v), \ + .idProduct = (p), \ + .bDeviceClass = 0xff, \ + .bDeviceSubClass = 0xff, \ + .bDeviceProtocol = 0xff, \ + .bInterfaceClass = 0xff, \ + .bInterfaceSubClass = 0xff, \ + .bInterfaceProtocol = 0xff, \ +} + + +/** USB device ID's that we handle */ +static struct usb_device_id i1480_usb_id_table[] = { + i1480_USB_DEV(0x8086, 0xdf3b), + i1480_USB_DEV(0x15a9, 0x0005), + i1480_USB_DEV(0x07d1, 0x3802), + i1480_USB_DEV(0x050d, 0x305a), + i1480_USB_DEV(0x3495, 0x3007), + {}, +}; +MODULE_DEVICE_TABLE(usb, i1480_usb_id_table); + + +static struct usb_driver i1480_dfu_driver = { + .name = "i1480-dfu-usb", + .id_table = i1480_usb_id_table, + .probe = i1480_usb_probe, + .disconnect = NULL, +}; + + +/* + * Initialize the i1480 DFU driver. + * + * We also need to register our function for guessing event sizes. + */ +static int __init i1480_dfu_driver_init(void) +{ + return usb_register(&i1480_dfu_driver); +} +module_init(i1480_dfu_driver_init); + + +static void __exit i1480_dfu_driver_exit(void) +{ + usb_deregister(&i1480_dfu_driver); +} +module_exit(i1480_dfu_driver_exit); + + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c new file mode 100644 index 0000000..7bf8c6f --- /dev/null +++ b/drivers/uwb/i1480/i1480-est.c @@ -0,0 +1,99 @@ +/* + * Intel Wireless UWB Link 1480 + * Event Size tables for Wired Adaptors + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#include +#include +#include +#include "dfu/i1480-dfu.h" + + +/** Event size table for wEvents 0x00XX */ +static struct uwb_est_entry i1480_est_fd00[] = { + /* Anybody expecting this response has to use + * neh->extra_size to specify the real size that will + * come back. */ + [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) }, + [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) }, +#ifdef i1480_RCEB_EXTENDED + [0x09] = { + .size = sizeof(struct i1480_rceb), + .offset = 1 + offsetof(struct i1480_rceb, wParamLength), + }, +#endif +}; + +/** Event size table for wEvents 0x01XX */ +static struct uwb_est_entry i1480_est_fd01[] = { + [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) }, + [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 }, + [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 }, + [0xff & i1480_EVT_DEV_ID_CHANGE] = { + .size = sizeof(struct i1480_rceb) + 2 }, +}; + +static int i1480_est_init(void) +{ + int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, + i1480_est_fd00, + ARRAY_SIZE(i1480_est_fd00)); + if (result < 0) { + printk(KERN_ERR "Can't register EST table fd00: %d\n", result); + return result; + } + result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, + i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); + if (result < 0) { + printk(KERN_ERR "Can't register EST table fd01: %d\n", result); + return result; + } + return 0; +} +module_init(i1480_est_init); + +static void i1480_est_exit(void) +{ + uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, + i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); + uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, + i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); +} +module_exit(i1480_est_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables"); +MODULE_LICENSE("GPL"); + +/** + * USB device ID's that we handle + * + * [so we are loaded when this kind device is connected] + */ +static struct usb_device_id i1480_est_id_table[] = { + { USB_DEVICE(0x8086, 0xdf3b), }, + { USB_DEVICE(0x8086, 0x0c3b), }, + { }, +}; +MODULE_DEVICE_TABLE(usb, i1480_est_id_table); -- cgit v0.10.2 From a21b963aa4a98c645b1fa3799f2e4a6ebb6c974a Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:21 +0100 Subject: uwb: add the i1480 WLP driver Add the driver for the WLP capability of the Intel i1480 device. Signed-off-by: David Vrabel diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig index 59acb5b..317e9fe 100644 --- a/drivers/uwb/Kconfig +++ b/drivers/uwb/Kconfig @@ -76,4 +76,14 @@ config UWB_I1480U To compile this driver select Y (built in) or M (module). It is safe to select any even if you do not have the hardware. +config UWB_I1480U_WLP + tristate "Support for Intel Wireless UWB Link 1480 HWA's WLP interface" + depends on UWB_I1480U && UWB_WLP && NET + help + This driver enables WLP support for the i1480 when connected via + USB. WLP is the WiMedia Link Protocol, or IP over UWB. + + To compile this driver select Y (built in) or M (module). It + is safe to select any even if you don't have the hardware. + endif # UWB diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile index d69da16..212bbc7 100644 --- a/drivers/uwb/i1480/Makefile +++ b/drivers/uwb/i1480/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o +obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp/ diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h new file mode 100644 index 0000000..18a8b0e --- /dev/null +++ b/drivers/uwb/i1480/i1480-wlp.h @@ -0,0 +1,200 @@ +/* + * Intel 1480 Wireless UWB Link + * WLP specific definitions + * + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#ifndef __i1480_wlp_h__ +#define __i1480_wlp_h__ + +#include +#include +#include +#include +#include + +/* New simplified header format? */ +#undef WLP_HDR_FMT_2 /* FIXME: rename */ + +/** + * Values of the Delivery ID & Type field when PCA or DRP + * + * The Delivery ID & Type field in the WLP TX header indicates whether + * the frame is PCA or DRP. This is done based on the high level bit of + * this field. + * We use this constant to test if the traffic is PCA or DRP as follows: + * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP) + * this is DRP traffic + * else + * this is PCA traffic + */ +enum deliver_id_type_bit { + WLP_DRP = 8, +}; + +/** + * WLP TX header + * + * Indicates UWB/WLP-specific transmission parameters for a network + * packet. + */ +struct wlp_tx_hdr { + /* dword 0 */ + struct uwb_dev_addr dstaddr; + u8 key_index; + u8 mac_params; + /* dword 1 */ + u8 phy_params; +#ifndef WLP_HDR_FMT_2 + u8 reserved; + __le16 oui01; /* FIXME: not so sure if __le16 or u8[2] */ + /* dword 2 */ + u8 oui2; /* if all LE, it could be merged */ + __le16 prid; +#endif +} __attribute__((packed)); + +static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr) +{ + return hdr->mac_params & 0x0f; +} + +static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr) +{ + return (hdr->mac_params >> 4) & 0x07; +} + +static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr) +{ + return (hdr->mac_params >> 7) & 0x01; +} + +static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id) +{ + hdr->mac_params = (hdr->mac_params & ~0x0f) | id; +} + +static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr, + enum uwb_ack_pol policy) +{ + hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4); +} + +static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts) +{ + hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7); +} + +static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr) +{ + return hdr->phy_params & 0x0f; +} + +static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr) +{ + return (hdr->phy_params >> 4) & 0x0f; +} + +static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate) +{ + hdr->phy_params = (hdr->phy_params & ~0x0f) | rate; +} + +static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr) +{ + hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4); +} + + +/** + * WLP RX header + * + * Provides UWB/WLP-specific transmission data for a received + * network packet. + */ +struct wlp_rx_hdr { + /* dword 0 */ + struct uwb_dev_addr dstaddr; + struct uwb_dev_addr srcaddr; + /* dword 1 */ + u8 LQI; + s8 RSSI; + u8 reserved3; +#ifndef WLP_HDR_FMT_2 + u8 oui0; + /* dword 2 */ + __le16 oui12; + __le16 prid; +#endif +} __attribute__((packed)); + + +/** User configurable options for WLP */ +struct wlp_options { + struct mutex mutex; /* access to user configurable options*/ + struct wlp_tx_hdr def_tx_hdr; /* default tx hdr */ + u8 pca_base_priority; + u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/ +}; + + +static inline +void wlp_options_init(struct wlp_options *options) +{ + mutex_init(&options->mutex); + wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM); + wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1); + /* FIXME: default to phy caps */ + wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480); +#ifndef WLP_HDR_FMT_2 + options->def_tx_hdr.prid = cpu_to_le16(0x0000); +#endif +} + + +/* sysfs helpers */ + +extern ssize_t uwb_pca_base_priority_store(struct wlp_options *, + const char *, size_t); +extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *); +extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t); +extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *); +extern ssize_t uwb_ack_policy_store(struct wlp_options *, + const char *, size_t); +extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *); +extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t); +extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *); +extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t); +extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *); + + +/** Simple bandwidth allocation (temporary and too simple) */ +struct wlp_bw_allocs { + const char *name; + struct { + u8 mask, stream; + } tx, rx; +}; + + +#endif /* #ifndef __i1480_wlp_h__ */ diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile new file mode 100644 index 0000000..fe6709b --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o + +i1480u-wlp-objs := \ + lc.o \ + netdev.o \ + rx.o \ + sysfs.o \ + tx.o diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h new file mode 100644 index 0000000..5f1b2951 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h @@ -0,0 +1,284 @@ +/* + * Intel 1480 Wireless UWB Link USB + * Header formats, constants, general internal interfaces + * + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This is not an standard interface. + * + * FIXME: docs + * + * i1480u-wlp is pretty simple: two endpoints, one for tx, one for + * rx. rx is polled. Network packets (ethernet, whatever) are wrapped + * in i1480 TX or RX headers (for sending over the air), and these + * packets are wrapped in UNTD headers (for sending to the WLP UWB + * controller). + * + * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets + * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the + * i1480 packet is broken in chunks/packets: + * + * UNTD-1st.hdr + i1480.hdr + payload + * UNTD-next.hdr + payload + * ... + * UNTD-last.hdr + payload + * + * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE. + * + * All HW structures and bitmaps are little endian, so we need to play + * ugly tricks when defining bitfields. Hoping for the day GCC + * implements __attribute__((endian(1234))). + * + * FIXME: ROADMAP to the whole implementation + */ + +#ifndef __i1480u_wlp_h__ +#define __i1480u_wlp_h__ + +#include +#include +#include /* struct uwb_rc, struct uwb_notifs_handler */ +#include +#include "../i1480-wlp.h" + +#undef i1480u_FLOW_CONTROL /* Enable flow control code */ + +/** + * Basic flow control + */ +enum { + i1480u_TX_INFLIGHT_MAX = 1000, + i1480u_TX_INFLIGHT_THRESHOLD = 100, +}; + +/** Maximum size of a transaction that we can tx/rx */ +enum { + /* Maximum packet size computed as follows: max UNTD header (8) + + * i1480 RX header (8) + max Ethernet header and payload (4096) + + * Padding added by skb_reserve (2) to make post Ethernet payload + * start on 16 byte boundary*/ + i1480u_MAX_RX_PKT_SIZE = 4114, + i1480u_MAX_FRG_SIZE = 512, + i1480u_RX_BUFS = 9, +}; + + +/** + * UNTD packet type + * + * We need to fragment any payload whose UNTD packet is going to be + * bigger than i1480u_MAX_FRG_SIZE. + */ +enum i1480u_pkt_type { + i1480u_PKT_FRAG_1ST = 0x1, + i1480u_PKT_FRAG_NXT = 0x0, + i1480u_PKT_FRAG_LST = 0x2, + i1480u_PKT_FRAG_CMP = 0x3 +}; +enum { + i1480u_PKT_NONE = 0x4, +}; + +/** USB Network Transfer Descriptor - common */ +struct untd_hdr { + u8 type; + __le16 len; +} __attribute__((packed)); + +static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr) +{ + return hdr->type & 0x03; +} + +static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr) +{ + return (hdr->type >> 2) & 0x01; +} + +static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type) +{ + hdr->type = (hdr->type & ~0x03) | type; +} + +static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx) +{ + hdr->type = (hdr->type & ~0x04) | (rx_tx << 2); +} + + +/** + * USB Network Transfer Descriptor - Complete Packet + * + * This is for a packet that is smaller (header + payload) than + * i1480u_MAX_FRG_SIZE. + * + * @hdr.total_len is the size of the payload; the payload doesn't + * count this header nor the padding, but includes the size of i1480 + * header. + */ +struct untd_hdr_cmp { + struct untd_hdr hdr; + u8 padding; +} __attribute__((packed)); + + +/** + * USB Network Transfer Descriptor - First fragment + * + * @hdr.len is the size of the *whole packet* (excluding UNTD + * headers); @fragment_len is the size of the payload (excluding UNTD + * headers, but including i1480 headers). + */ +struct untd_hdr_1st { + struct untd_hdr hdr; + __le16 fragment_len; + u8 padding[3]; +} __attribute__((packed)); + + +/** + * USB Network Transfer Descriptor - Next / Last [Rest] + * + * @hdr.len is the size of the payload, not including headrs. + */ +struct untd_hdr_rst { + struct untd_hdr hdr; + u8 padding; +} __attribute__((packed)); + + +/** + * Transmission context + * + * Wraps all the stuff needed to track a pending/active tx + * operation. + */ +struct i1480u_tx { + struct list_head list_node; + struct i1480u *i1480u; + struct urb *urb; + + struct sk_buff *skb; + struct wlp_tx_hdr *wlp_tx_hdr; + + void *buf; /* if NULL, no new buf was used */ + size_t buf_size; +}; + +/** + * Basic flow control + * + * We maintain a basic flow control counter. "count" how many TX URBs are + * outstanding. Only allow "max" + * TX URBs to be outstanding. If this value is reached the queue will be + * stopped. The queue will be restarted when there are + * "threshold" URBs outstanding. + * Maintain a counter of how many time the TX queue needed to be restarted + * due to the "max" being exceeded and the "threshold" reached again. The + * timestamp "restart_ts" is to keep track from when the counter was last + * queried (see sysfs handling of file wlp_tx_inflight). + */ +struct i1480u_tx_inflight { + atomic_t count; + unsigned long max; + unsigned long threshold; + unsigned long restart_ts; + atomic_t restart_count; +}; + +/** + * Instance of a i1480u WLP interface + * + * Keeps references to the USB device that wraps it, as well as it's + * interface and associated UWB host controller. As well, it also + * keeps a link to the netdevice for integration into the networking + * stack. + * We maintian separate error history for the tx and rx endpoints because + * the implementation does not rely on locking - having one shared + * structure between endpoints may cause problems. Adding locking to the + * implementation will have higher cost than adding a separate structure. + */ +struct i1480u { + struct usb_device *usb_dev; + struct usb_interface *usb_iface; + struct net_device *net_dev; + + spinlock_t lock; + struct net_device_stats stats; + + /* RX context handling */ + struct sk_buff *rx_skb; + struct uwb_dev_addr rx_srcaddr; + size_t rx_untd_pkt_size; + struct i1480u_rx_buf { + struct i1480u *i1480u; /* back pointer */ + struct urb *urb; + struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */ + } rx_buf[i1480u_RX_BUFS]; /* N bufs */ + + spinlock_t tx_list_lock; /* TX context */ + struct list_head tx_list; + u8 tx_stream; + + struct stats lqe_stats, rssi_stats; /* radio statistics */ + + /* Options we can set from sysfs */ + struct wlp_options options; + struct uwb_notifs_handler uwb_notifs_handler; + struct edc tx_errors; + struct edc rx_errors; + struct wlp wlp; +#ifdef i1480u_FLOW_CONTROL + struct urb *notif_urb; + struct edc notif_edc; /* error density counter */ + u8 notif_buffer[1]; +#endif + struct i1480u_tx_inflight tx_inflight; +}; + +/* Internal interfaces */ +extern void i1480u_rx_cb(struct urb *urb); +extern int i1480u_rx_setup(struct i1480u *); +extern void i1480u_rx_release(struct i1480u *); +extern void i1480u_tx_release(struct i1480u *); +extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *, + struct uwb_dev_addr *); +extern void i1480u_stop_queue(struct wlp *); +extern void i1480u_start_queue(struct wlp *); +extern int i1480u_sysfs_setup(struct i1480u *); +extern void i1480u_sysfs_release(struct i1480u *); + +/* netdev interface */ +extern int i1480u_open(struct net_device *); +extern int i1480u_stop(struct net_device *); +extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *); +extern void i1480u_tx_timeout(struct net_device *); +extern int i1480u_set_config(struct net_device *, struct ifmap *); +extern struct net_device_stats *i1480u_get_stats(struct net_device *); +extern int i1480u_change_mtu(struct net_device *, int); +extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs); + +/* bandwidth allocation callback */ +extern void i1480u_bw_alloc_cb(struct uwb_rsv *); + +/* Sys FS */ +extern struct attribute_group i1480u_wlp_attr_group; + +#endif /* #ifndef __i1480u_wlp_h__ */ diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c new file mode 100644 index 0000000..737d60c --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/lc.c @@ -0,0 +1,421 @@ +/* + * WUSB Wire Adapter: WLP interface + * Driver for the Linux Network stack. + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * This implements a very simple network driver for the WLP USB + * device that is associated to a UWB (Ultra Wide Band) host. + * + * This is seen as an interface of a composite device. Once the UWB + * host has an association to another WLP capable device, the + * networking interface (aka WLP) can start to send packets back and + * forth. + * + * Limitations: + * + * - Hand cranked; can't ifup the interface until there is an association + * + * - BW allocation very simplistic [see i1480u_mas_set() and callees]. + * + * + * ROADMAP: + * + * ENTRY POINTS (driver model): + * + * i1480u_driver_{exit,init}(): initialization of the driver. + * + * i1480u_probe(): called by the driver code when a device + * matching 'i1480u_id_table' is connected. + * + * This allocs a netdev instance, inits with + * i1480u_add(), then registers_netdev(). + * i1480u_init() + * i1480u_add() + * + * i1480u_disconnect(): device has been disconnected/module + * is being removed. + * i1480u_rm() + */ +#include +#include +#include +#include +#include "i1480u-wlp.h" + + + +static inline +void i1480u_init(struct i1480u *i1480u) +{ + /* nothing so far... doesn't it suck? */ + spin_lock_init(&i1480u->lock); + INIT_LIST_HEAD(&i1480u->tx_list); + spin_lock_init(&i1480u->tx_list_lock); + wlp_options_init(&i1480u->options); + edc_init(&i1480u->tx_errors); + edc_init(&i1480u->rx_errors); +#ifdef i1480u_FLOW_CONTROL + edc_init(&i1480u->notif_edc); +#endif + stats_init(&i1480u->lqe_stats); + stats_init(&i1480u->rssi_stats); + wlp_init(&i1480u->wlp); +} + +/** + * Fill WLP device information structure + * + * The structure will contain a few character arrays, each ending with a + * null terminated string. Each string has to fit (excluding terminating + * character) into a specified range obtained from the WLP substack. + * + * It is still not clear exactly how this device information should be + * obtained. Until we find out we use the USB device descriptor as backup, some + * information elements have intuitive mappings, other not. + */ +static +void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info) +{ + struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); + struct usb_device *usb_dev = i1480u->usb_dev; + /* Treat device name and model name the same */ + if (usb_dev->descriptor.iProduct) { + usb_string(usb_dev, usb_dev->descriptor.iProduct, + dev_info->name, sizeof(dev_info->name)); + usb_string(usb_dev, usb_dev->descriptor.iProduct, + dev_info->model_name, sizeof(dev_info->model_name)); + } + if (usb_dev->descriptor.iManufacturer) + usb_string(usb_dev, usb_dev->descriptor.iManufacturer, + dev_info->manufacturer, + sizeof(dev_info->manufacturer)); + scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x", + __le16_to_cpu(usb_dev->descriptor.bcdDevice)); + if (usb_dev->descriptor.iSerialNumber) + usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, + dev_info->serial, sizeof(dev_info->serial)); + /* FIXME: where should we obtain category? */ + dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER); + /* FIXME: Complete OUI and OUIsubdiv attributes */ +} + +#ifdef i1480u_FLOW_CONTROL +/** + * Callback for the notification endpoint + * + * This mostly controls the xon/xoff protocol. In case of hard error, + * we stop the queue. If not, we always retry. + */ +static +void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs) +{ + struct i1480u *i1480u = urb->context; + struct usb_interface *usb_iface = i1480u->usb_iface; + struct device *dev = &usb_iface->dev; + int result; + + switch (urb->status) { + case 0: /* Got valid data, do xon/xoff */ + switch (i1480u->notif_buffer[0]) { + case 'N': + dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies); + netif_stop_queue(i1480u->net_dev); + break; + case 'A': + dev_err(dev, "XON STARTING queue at %lu\n", jiffies); + netif_start_queue(i1480u->net_dev); + break; + default: + dev_err(dev, "NEP: unknown data 0x%02hhx\n", + i1480u->notif_buffer[0]); + } + break; + case -ECONNRESET: /* Controlled situation ... */ + case -ENOENT: /* we killed the URB... */ + dev_err(dev, "NEP: URB reset/noent %d\n", urb->status); + goto error; + case -ESHUTDOWN: /* going away! */ + dev_err(dev, "NEP: URB down %d\n", urb->status); + goto error; + default: /* Retry unless it gets ugly */ + if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "NEP: URB max acceptable errors " + "exceeded; resetting device\n"); + goto error_reset; + } + dev_err(dev, "NEP: URB error %d\n", urb->status); + break; + } + result = usb_submit_urb(urb, GFP_ATOMIC); + if (result < 0) { + dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n", + result); + goto error_reset; + } + return; + +error_reset: + wlp_reset_all(&i1480-wlp); +error: + netif_stop_queue(i1480u->net_dev); + return; +} +#endif + +static +int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface) +{ + int result = -ENODEV; + struct wlp *wlp = &i1480u->wlp; + struct usb_device *usb_dev = interface_to_usbdev(iface); + struct net_device *net_dev = i1480u->net_dev; + struct uwb_rc *rc; + struct uwb_dev *uwb_dev; +#ifdef i1480u_FLOW_CONTROL + struct usb_endpoint_descriptor *epd; +#endif + + i1480u->usb_dev = usb_get_dev(usb_dev); + i1480u->usb_iface = iface; + rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev); + if (rc == NULL) { + dev_err(&iface->dev, "Cannot get associated UWB Radio " + "Controller\n"); + goto out; + } + wlp->xmit_frame = i1480u_xmit_frame; + wlp->fill_device_info = i1480u_fill_device_info; + wlp->stop_queue = i1480u_stop_queue; + wlp->start_queue = i1480u_start_queue; + result = wlp_setup(wlp, rc); + if (result < 0) { + dev_err(&iface->dev, "Cannot setup WLP\n"); + goto error_wlp_setup; + } + result = 0; + ether_setup(net_dev); /* make it an etherdevice */ + uwb_dev = &rc->uwb_dev; + /* FIXME: hookup address change notifications? */ + + memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data, + sizeof(net_dev->dev_addr)); + + net_dev->hard_header_len = sizeof(struct untd_hdr_cmp) + + sizeof(struct wlp_tx_hdr) + + WLP_DATA_HLEN + + ETH_HLEN; + net_dev->mtu = 3500; + net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */ + +/* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */ + /* FIXME: multicast disabled */ + net_dev->flags &= ~IFF_MULTICAST; + net_dev->features &= ~NETIF_F_SG; + net_dev->features &= ~NETIF_F_FRAGLIST; + /* All NETIF_F_*_CSUM disabled */ + net_dev->features |= NETIF_F_HIGHDMA; + net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */ + + net_dev->open = i1480u_open; + net_dev->stop = i1480u_stop; + net_dev->hard_start_xmit = i1480u_hard_start_xmit; + net_dev->tx_timeout = i1480u_tx_timeout; + net_dev->get_stats = i1480u_get_stats; + net_dev->set_config = i1480u_set_config; + net_dev->change_mtu = i1480u_change_mtu; + +#ifdef i1480u_FLOW_CONTROL + /* Notification endpoint setup (submitted when we open the device) */ + i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL); + if (i1480u->notif_urb == NULL) { + dev_err(&iface->dev, "Unable to allocate notification URB\n"); + result = -ENOMEM; + goto error_urb_alloc; + } + epd = &iface->cur_altsetting->endpoint[0].desc; + usb_fill_int_urb(i1480u->notif_urb, usb_dev, + usb_rcvintpipe(usb_dev, epd->bEndpointAddress), + i1480u->notif_buffer, sizeof(i1480u->notif_buffer), + i1480u_notif_cb, i1480u, epd->bInterval); + +#endif + + i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX; + i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD; + i1480u->tx_inflight.restart_ts = jiffies; + usb_set_intfdata(iface, i1480u); + return result; + +#ifdef i1480u_FLOW_CONTROL +error_urb_alloc: +#endif + wlp_remove(wlp); +error_wlp_setup: + uwb_rc_put(rc); +out: + usb_put_dev(i1480u->usb_dev); + return result; +} + +static void i1480u_rm(struct i1480u *i1480u) +{ + struct uwb_rc *rc = i1480u->wlp.rc; + usb_set_intfdata(i1480u->usb_iface, NULL); +#ifdef i1480u_FLOW_CONTROL + usb_kill_urb(i1480u->notif_urb); + usb_free_urb(i1480u->notif_urb); +#endif + wlp_remove(&i1480u->wlp); + uwb_rc_put(rc); + usb_put_dev(i1480u->usb_dev); +} + +/** Just setup @net_dev's i1480u private data */ +static void i1480u_netdev_setup(struct net_device *net_dev) +{ + struct i1480u *i1480u = netdev_priv(net_dev); + /* Initialize @i1480u */ + memset(i1480u, 0, sizeof(*i1480u)); + i1480u_init(i1480u); +} + +/** + * Probe a i1480u interface and register it + * + * @iface: USB interface to link to + * @id: USB class/subclass/protocol id + * @returns: 0 if ok, < 0 errno code on error. + * + * Does basic housekeeping stuff and then allocs a netdev with space + * for the i1480u data. Initializes, registers in i1480u, registers in + * netdev, ready to go. + */ +static int i1480u_probe(struct usb_interface *iface, + const struct usb_device_id *id) +{ + int result; + struct net_device *net_dev; + struct device *dev = &iface->dev; + struct i1480u *i1480u; + + /* Allocate instance [calls i1480u_netdev_setup() on it] */ + result = -ENOMEM; + net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup); + if (net_dev == NULL) { + dev_err(dev, "no memory for network device instance\n"); + goto error_alloc_netdev; + } + SET_NETDEV_DEV(net_dev, dev); + i1480u = netdev_priv(net_dev); + i1480u->net_dev = net_dev; + result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */ + if (result < 0) { + dev_err(dev, "cannot add i1480u device: %d\n", result); + goto error_i1480u_add; + } + result = register_netdev(net_dev); /* Okey dokey, bring it up */ + if (result < 0) { + dev_err(dev, "cannot register network device: %d\n", result); + goto error_register_netdev; + } + i1480u_sysfs_setup(i1480u); + if (result < 0) + goto error_sysfs_init; + return 0; + +error_sysfs_init: + unregister_netdev(net_dev); +error_register_netdev: + i1480u_rm(i1480u); +error_i1480u_add: + free_netdev(net_dev); +error_alloc_netdev: + return result; +} + + +/** + * Disconect a i1480u from the system. + * + * i1480u_stop() has been called before, so al the rx and tx contexts + * have been taken down already. Make sure the queue is stopped, + * unregister netdev and i1480u, free and kill. + */ +static void i1480u_disconnect(struct usb_interface *iface) +{ + struct i1480u *i1480u; + struct net_device *net_dev; + + i1480u = usb_get_intfdata(iface); + net_dev = i1480u->net_dev; + netif_stop_queue(net_dev); +#ifdef i1480u_FLOW_CONTROL + usb_kill_urb(i1480u->notif_urb); +#endif + i1480u_sysfs_release(i1480u); + unregister_netdev(net_dev); + i1480u_rm(i1480u); + free_netdev(net_dev); +} + +static struct usb_device_id i1480u_id_table[] = { + { + .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ + | USB_DEVICE_ID_MATCH_DEV_INFO \ + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x8086, + .idProduct = 0x0c3b, + .bDeviceClass = 0xef, + .bDeviceSubClass = 0x02, + .bDeviceProtocol = 0x02, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + }, + {}, +}; +MODULE_DEVICE_TABLE(usb, i1480u_id_table); + +static struct usb_driver i1480u_driver = { + .name = KBUILD_MODNAME, + .probe = i1480u_probe, + .disconnect = i1480u_disconnect, + .id_table = i1480u_id_table, +}; + +static int __init i1480u_driver_init(void) +{ + return usb_register(&i1480u_driver); +} +module_init(i1480u_driver_init); + + +static void __exit i1480u_driver_exit(void) +{ + usb_deregister(&i1480u_driver); +} +module_exit(i1480u_driver_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c new file mode 100644 index 0000000..8802ac4 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c @@ -0,0 +1,368 @@ +/* + * WUSB Wire Adapter: WLP interface + * Driver for the Linux Network stack. + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * Implementation of the netdevice linkage (except tx and rx related stuff). + * + * ROADMAP: + * + * ENTRY POINTS (Net device): + * + * i1480u_open(): Called when we ifconfig up the interface; + * associates to a UWB host controller, reserves + * bandwidth (MAS), sets up RX USB URB and starts + * the queue. + * + * i1480u_stop(): Called when we ifconfig down a interface; + * reverses _open(). + * + * i1480u_set_config(): + */ + +#include +#include +#include +#include "i1480u-wlp.h" + +struct i1480u_cmd_set_ip_mas { + struct uwb_rccb rccb; + struct uwb_dev_addr addr; + u8 stream; + u8 owner; + u8 type; /* enum uwb_drp_type */ + u8 baMAS[32]; +} __attribute__((packed)); + + +static +int i1480u_set_ip_mas( + struct uwb_rc *rc, + const struct uwb_dev_addr *dstaddr, + u8 stream, u8 owner, u8 type, unsigned long *mas) +{ + + int result; + struct i1480u_cmd_set_ip_mas *cmd; + struct uwb_rc_evt_confirm reply; + + result = -ENOMEM; + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + goto error_kzalloc; + cmd->rccb.bCommandType = 0xfd; + cmd->rccb.wCommand = cpu_to_le16(0x000e); + cmd->addr = *dstaddr; + cmd->stream = stream; + cmd->owner = owner; + cmd->type = type; + if (mas == NULL) + memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS)); + else + memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS)); + reply.rceb.bEventType = 0xfd; + reply.rceb.wEvent = cpu_to_le16(0x000e); + result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd), + &reply.rceb, sizeof(reply)); + if (result < 0) + goto error_cmd; + if (reply.bResultCode != UWB_RC_RES_FAIL) { + dev_err(&rc->uwb_dev.dev, + "SET-IP-MAS: command execution failed: %d\n", + reply.bResultCode); + result = -EIO; + } +error_cmd: + kfree(cmd); +error_kzalloc: + return result; +} + +/* + * Inform a WLP interface of a MAS reservation + * + * @rc is assumed refcnted. + */ +/* FIXME: detect if remote device is WLP capable? */ +static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc, + u8 stream, u8 owner, u8 type, unsigned long *mas) +{ + int result = 0; + struct device *dev = &rc->uwb_dev.dev; + + result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner, + type, mas); + if (result < 0) { + char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE]; + uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf), + &rc->uwb_dev.dev_addr); + uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf), + &uwb_dev->dev_addr); + dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n", + rcaddrbuf, devaddrbuf, result); + } + return result; +} + +/** + * Called by bandwidth allocator when change occurs in reservation. + * + * @rsv: The reservation that is being established, modified, or + * terminated. + * + * When a reservation is established, modified, or terminated the upper layer + * (WLP here) needs set/update the currently available Media Access Slots + * that can be use for IP traffic. + * + * Our action taken during failure depends on how the reservation is being + * changed: + * - if reservation is being established we do nothing if we cannot set the + * new MAS to be used + * - if reservation is being terminated we revert back to PCA whether the + * SET IP MAS command succeeds or not. + */ +void i1480u_bw_alloc_cb(struct uwb_rsv *rsv) +{ + int result = 0; + struct i1480u *i1480u = rsv->pal_priv; + struct device *dev = &i1480u->usb_iface->dev; + struct uwb_dev *target_dev = rsv->target.dev; + struct uwb_rc *rc = i1480u->wlp.rc; + u8 stream = rsv->stream; + int type = rsv->type; + int is_owner = rsv->owner == &rc->uwb_dev; + unsigned long *bmp = rsv->mas.bm; + + dev_err(dev, "WLP callback called - sending set ip mas\n"); + /*user cannot change options while setting configuration*/ + mutex_lock(&i1480u->options.mutex); + switch (rsv->state) { + case UWB_RSV_STATE_T_ACCEPTED: + case UWB_RSV_STATE_O_ESTABLISHED: + result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner, + type, bmp); + if (result < 0) { + dev_err(dev, "MAS reservation failed: %d\n", result); + goto out; + } + if (is_owner) { + wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr, + WLP_DRP | stream); + wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0); + } + break; + case UWB_RSV_STATE_NONE: + /* revert back to PCA */ + result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner, + type, bmp); + if (result < 0) + dev_err(dev, "MAS reservation failed: %d\n", result); + /* Revert to PCA even though SET IP MAS failed. */ + wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr, + i1480u->options.pca_base_priority); + wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1); + break; + default: + dev_err(dev, "unexpected WLP reservation state: %s (%d).\n", + uwb_rsv_state_str(rsv->state), rsv->state); + break; + } +out: + mutex_unlock(&i1480u->options.mutex); + return; +} + +/** + * + * Called on 'ifconfig up' + */ +int i1480u_open(struct net_device *net_dev) +{ + int result; + struct i1480u *i1480u = netdev_priv(net_dev); + struct wlp *wlp = &i1480u->wlp; + struct uwb_rc *rc; + struct device *dev = &i1480u->usb_iface->dev; + + rc = wlp->rc; + result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ + if (result < 0) + goto error_rx_setup; + netif_wake_queue(net_dev); +#ifdef i1480u_FLOW_CONTROL + result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; + if (result < 0) { + dev_err(dev, "Can't submit notification URB: %d\n", result); + goto error_notif_urb_submit; + } +#endif + i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb; + i1480u->uwb_notifs_handler.data = i1480u; + if (uwb_bg_joined(rc)) + netif_carrier_on(net_dev); + else + netif_carrier_off(net_dev); + uwb_notifs_register(rc, &i1480u->uwb_notifs_handler); + /* Interface is up with an address, now we can create WSS */ + result = wlp_wss_setup(net_dev, &wlp->wss); + if (result < 0) { + dev_err(dev, "Can't create WSS: %d. \n", result); + goto error_notif_deregister; + } + return 0; +error_notif_deregister: + uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); +#ifdef i1480u_FLOW_CONTROL +error_notif_urb_submit: +#endif + netif_stop_queue(net_dev); + i1480u_rx_release(i1480u); +error_rx_setup: + return result; +} + + +/** + * Called on 'ifconfig down' + */ +int i1480u_stop(struct net_device *net_dev) +{ + struct i1480u *i1480u = netdev_priv(net_dev); + struct wlp *wlp = &i1480u->wlp; + struct uwb_rc *rc = wlp->rc; + + BUG_ON(wlp->rc == NULL); + wlp_wss_remove(&wlp->wss); + uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); + netif_carrier_off(net_dev); +#ifdef i1480u_FLOW_CONTROL + usb_kill_urb(i1480u->notif_urb); +#endif + netif_stop_queue(net_dev); + i1480u_rx_release(i1480u); + i1480u_tx_release(i1480u); + return 0; +} + + +/** Report statistics */ +struct net_device_stats *i1480u_get_stats(struct net_device *net_dev) +{ + struct i1480u *i1480u = netdev_priv(net_dev); + return &i1480u->stats; +} + + +/** + * + * Change the interface config--we probably don't have to do anything. + */ +int i1480u_set_config(struct net_device *net_dev, struct ifmap *map) +{ + int result; + struct i1480u *i1480u = netdev_priv(net_dev); + BUG_ON(i1480u->wlp.rc == NULL); + result = 0; + return result; +} + +/** + * Change the MTU of the interface + */ +int i1480u_change_mtu(struct net_device *net_dev, int mtu) +{ + static union { + struct wlp_tx_hdr tx; + struct wlp_rx_hdr rx; + } i1480u_all_hdrs; + + if (mtu < ETH_HLEN) /* We encap eth frames */ + return -ERANGE; + if (mtu > 4000 - sizeof(i1480u_all_hdrs)) + return -ERANGE; + net_dev->mtu = mtu; + return 0; +} + + +/** + * Callback function to handle events from UWB + * When we see other devices we know the carrier is ok, + * if we are the only device in the beacon group we set the carrier + * state to off. + * */ +void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev, + enum uwb_notifs event) +{ + struct i1480u *i1480u = data; + struct net_device *net_dev = i1480u->net_dev; + struct device *dev = &i1480u->usb_iface->dev; + switch (event) { + case UWB_NOTIF_BG_JOIN: + netif_carrier_on(net_dev); + dev_info(dev, "Link is up\n"); + break; + case UWB_NOTIF_BG_LEAVE: + netif_carrier_off(net_dev); + dev_info(dev, "Link is down\n"); + break; + default: + dev_err(dev, "don't know how to handle event %d from uwb\n", + event); + } +} + +/** + * Stop the network queue + * + * Enable WLP substack to stop network queue. We also set the flow control + * threshold at this time to prevent the flow control from restarting the + * queue. + * + * we are loosing the current threshold value here ... FIXME? + */ +void i1480u_stop_queue(struct wlp *wlp) +{ + struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); + struct net_device *net_dev = i1480u->net_dev; + i1480u->tx_inflight.threshold = 0; + netif_stop_queue(net_dev); +} + +/** + * Start the network queue + * + * Enable WLP substack to start network queue. Also re-enable the flow + * control to manage the queue again. + * + * We re-enable the flow control by storing the default threshold in the + * flow control threshold. This means that if the user modified the + * threshold before the queue was stopped and restarted that information + * will be lost. FIXME? + */ +void i1480u_start_queue(struct wlp *wlp) +{ + struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); + struct net_device *net_dev = i1480u->net_dev; + i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD; + netif_start_queue(net_dev); +} diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c new file mode 100644 index 0000000..9fc0353 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/rx.c @@ -0,0 +1,486 @@ +/* + * WUSB Wire Adapter: WLP interface + * Driver for the Linux Network stack. + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * i1480u's RX handling is simple. i1480u will send the received + * network packets broken up in fragments; 1 to N fragments make a + * packet, we assemble them together and deliver the packet with netif_rx(). + * + * Beacuse each USB transfer is a *single* fragment (except when the + * transfer contains a first fragment), each URB called thus + * back contains one or two fragments. So we queue N URBs, each with its own + * fragment buffer. When a URB is done, we process it (adding to the + * current skb from the fragment buffer until complete). Once + * processed, we requeue the URB. There is always a bunch of URBs + * ready to take data, so the intergap should be minimal. + * + * An URB's transfer buffer is the data field of a socket buffer. This + * reduces copying as data can be passed directly to network layer. If a + * complete packet or 1st fragment is received the URB's transfer buffer is + * taken away from it and used to send data to the network layer. In this + * case a new transfer buffer is allocated to the URB before being requeued. + * If a "NEXT" or "LAST" fragment is received, the fragment contents is + * appended to the RX packet under construction and the transfer buffer + * is reused. To be able to use this buffer to assemble complete packets + * we set each buffer's size to that of the MAX ethernet packet that can + * be received. There is thus room for improvement in memory usage. + * + * When the max tx fragment size increases, we should be able to read + * data into the skbs directly with very simple code. + * + * ROADMAP: + * + * ENTRY POINTS: + * + * i1480u_rx_setup(): setup RX context [from i1480u_open()] + * + * i1480u_rx_release(): release RX context [from i1480u_stop()] + * + * i1480u_rx_cb(): called when the RX USB URB receives a + * packet. It removes the header and pushes it up + * the Linux netdev stack with netif_rx(). + * + * i1480u_rx_buffer() + * i1480u_drop() and i1480u_fix() + * i1480u_skb_deliver + * + */ + +#include +#include +#include "i1480u-wlp.h" + +#define D_LOCAL 0 +#include + + +/** + * Setup the RX context + * + * Each URB is provided with a transfer_buffer that is the data field + * of a new socket buffer. + */ +int i1480u_rx_setup(struct i1480u *i1480u) +{ + int result, cnt; + struct device *dev = &i1480u->usb_iface->dev; + struct net_device *net_dev = i1480u->net_dev; + struct usb_endpoint_descriptor *epd; + struct sk_buff *skb; + + /* Alloc RX stuff */ + i1480u->rx_skb = NULL; /* not in process of receiving packet */ + result = -ENOMEM; + epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc; + for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) { + struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt]; + rx_buf->i1480u = i1480u; + skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE); + if (!skb) { + dev_err(dev, + "RX: cannot allocate RX buffer %d\n", cnt); + result = -ENOMEM; + goto error; + } + skb->dev = net_dev; + skb->ip_summed = CHECKSUM_NONE; + skb_reserve(skb, 2); + rx_buf->data = skb; + rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL); + if (unlikely(rx_buf->urb == NULL)) { + dev_err(dev, "RX: cannot allocate URB %d\n", cnt); + result = -ENOMEM; + goto error; + } + usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev, + usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress), + rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2, + i1480u_rx_cb, rx_buf); + result = usb_submit_urb(rx_buf->urb, GFP_NOIO); + if (unlikely(result < 0)) { + dev_err(dev, "RX: cannot submit URB %d: %d\n", + cnt, result); + goto error; + } + } + return 0; + +error: + i1480u_rx_release(i1480u); + return result; +} + + +/** Release resources associated to the rx context */ +void i1480u_rx_release(struct i1480u *i1480u) +{ + int cnt; + for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) { + if (i1480u->rx_buf[cnt].data) + dev_kfree_skb(i1480u->rx_buf[cnt].data); + if (i1480u->rx_buf[cnt].urb) { + usb_kill_urb(i1480u->rx_buf[cnt].urb); + usb_free_urb(i1480u->rx_buf[cnt].urb); + } + } + if (i1480u->rx_skb != NULL) + dev_kfree_skb(i1480u->rx_skb); +} + +static +void i1480u_rx_unlink_urbs(struct i1480u *i1480u) +{ + int cnt; + for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) { + if (i1480u->rx_buf[cnt].urb) + usb_unlink_urb(i1480u->rx_buf[cnt].urb); + } +} + +/** Fix an out-of-sequence packet */ +#define i1480u_fix(i1480u, msg...) \ +do { \ + if (printk_ratelimit()) \ + dev_err(&i1480u->usb_iface->dev, msg); \ + dev_kfree_skb_irq(i1480u->rx_skb); \ + i1480u->rx_skb = NULL; \ + i1480u->rx_untd_pkt_size = 0; \ +} while (0) + + +/** Drop an out-of-sequence packet */ +#define i1480u_drop(i1480u, msg...) \ +do { \ + if (printk_ratelimit()) \ + dev_err(&i1480u->usb_iface->dev, msg); \ + i1480u->stats.rx_dropped++; \ +} while (0) + + + + +/** Finalizes setting up the SKB and delivers it + * + * We first pass the incoming frame to WLP substack for verification. It + * may also be a WLP association frame in which case WLP will take over the + * processing. If WLP does not take it over it will still verify it, if the + * frame is invalid the skb will be freed by WLP and we will not continue + * parsing. + * */ +static +void i1480u_skb_deliver(struct i1480u *i1480u) +{ + int should_parse; + struct net_device *net_dev = i1480u->net_dev; + struct device *dev = &i1480u->usb_iface->dev; + + d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n", + i1480u->rx_skb, i1480u->rx_skb->len); + d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len); + should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, + &i1480u->rx_srcaddr); + if (!should_parse) + goto out; + i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); + d_printf(5, dev, "RX delivered skb(%p), %u bytes\n", + i1480u->rx_skb, i1480u->rx_skb->len); + d_dump(7, dev, i1480u->rx_skb->data, + i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len); + i1480u->stats.rx_packets++; + i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; + net_dev->last_rx = jiffies; + /* FIXME: flow control: check netif_rx() retval */ + + netif_rx(i1480u->rx_skb); /* deliver */ +out: + i1480u->rx_skb = NULL; + i1480u->rx_untd_pkt_size = 0; +} + + +/** + * Process a buffer of data received from the USB RX endpoint + * + * First fragment arrives with next or last fragment. All other fragments + * arrive alone. + * + * /me hates long functions. + */ +static +void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf) +{ + unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */ + size_t untd_hdr_size, untd_frg_size; + size_t i1480u_hdr_size; + struct wlp_rx_hdr *i1480u_hdr = NULL; + + struct i1480u *i1480u = rx_buf->i1480u; + struct sk_buff *skb = rx_buf->data; + int size_left = rx_buf->urb->actual_length; + void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */ + struct untd_hdr *untd_hdr; + + struct net_device *net_dev = i1480u->net_dev; + struct device *dev = &i1480u->usb_iface->dev; + struct sk_buff *new_skb; + +#if 0 + dev_fnstart(dev, + "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left); + dev_err(dev, "RX packet, %zu bytes\n", size_left); + dump_bytes(dev, ptr, size_left); +#endif + i1480u_hdr_size = sizeof(struct wlp_rx_hdr); + + while (size_left > 0) { + if (pkt_completed) { + i1480u_drop(i1480u, "RX: fragment follows completed" + "packet in same buffer. Dropping\n"); + break; + } + untd_hdr = ptr; + if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */ + i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n"); + goto out; + } + if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */ + i1480u_drop(i1480u, "RX: TX bit set! Dropping\n"); + goto out; + } + switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */ + case i1480u_PKT_FRAG_1ST: { + struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr; + dev_dbg(dev, "1st fragment\n"); + untd_hdr_size = sizeof(struct untd_hdr_1st); + if (i1480u->rx_skb != NULL) + i1480u_fix(i1480u, "RX: 1st fragment out of " + "sequence! Fixing\n"); + if (size_left < untd_hdr_size + i1480u_hdr_size) { + i1480u_drop(i1480u, "RX: short 1st fragment! " + "Dropping\n"); + goto out; + } + i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len) + - i1480u_hdr_size; + untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len); + if (size_left < untd_hdr_size + untd_frg_size) { + i1480u_drop(i1480u, + "RX: short payload! Dropping\n"); + goto out; + } + i1480u->rx_skb = skb; + i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size; + i1480u->rx_srcaddr = i1480u_hdr->srcaddr; + skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size); + skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size); + stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7); + stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18); + rx_buf->data = NULL; /* need to create new buffer */ + break; + } + case i1480u_PKT_FRAG_NXT: { + dev_dbg(dev, "nxt fragment\n"); + untd_hdr_size = sizeof(struct untd_hdr_rst); + if (i1480u->rx_skb == NULL) { + i1480u_drop(i1480u, "RX: next fragment out of " + "sequence! Dropping\n"); + goto out; + } + if (size_left < untd_hdr_size) { + i1480u_drop(i1480u, "RX: short NXT fragment! " + "Dropping\n"); + goto out; + } + untd_frg_size = le16_to_cpu(untd_hdr->len); + if (size_left < untd_hdr_size + untd_frg_size) { + i1480u_drop(i1480u, + "RX: short payload! Dropping\n"); + goto out; + } + memmove(skb_put(i1480u->rx_skb, untd_frg_size), + ptr + untd_hdr_size, untd_frg_size); + break; + } + case i1480u_PKT_FRAG_LST: { + dev_dbg(dev, "Lst fragment\n"); + untd_hdr_size = sizeof(struct untd_hdr_rst); + if (i1480u->rx_skb == NULL) { + i1480u_drop(i1480u, "RX: last fragment out of " + "sequence! Dropping\n"); + goto out; + } + if (size_left < untd_hdr_size) { + i1480u_drop(i1480u, "RX: short LST fragment! " + "Dropping\n"); + goto out; + } + untd_frg_size = le16_to_cpu(untd_hdr->len); + if (size_left < untd_frg_size + untd_hdr_size) { + i1480u_drop(i1480u, + "RX: short payload! Dropping\n"); + goto out; + } + memmove(skb_put(i1480u->rx_skb, untd_frg_size), + ptr + untd_hdr_size, untd_frg_size); + pkt_completed = 1; + break; + } + case i1480u_PKT_FRAG_CMP: { + dev_dbg(dev, "cmp fragment\n"); + untd_hdr_size = sizeof(struct untd_hdr_cmp); + if (i1480u->rx_skb != NULL) + i1480u_fix(i1480u, "RX: fix out-of-sequence CMP" + " fragment!\n"); + if (size_left < untd_hdr_size + i1480u_hdr_size) { + i1480u_drop(i1480u, "RX: short CMP fragment! " + "Dropping\n"); + goto out; + } + i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len); + untd_frg_size = i1480u->rx_untd_pkt_size; + if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) { + i1480u_drop(i1480u, + "RX: short payload! Dropping\n"); + goto out; + } + i1480u->rx_skb = skb; + i1480u_hdr = (void *) untd_hdr + untd_hdr_size; + i1480u->rx_srcaddr = i1480u_hdr->srcaddr; + stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7); + stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18); + skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size); + skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size); + rx_buf->data = NULL; /* for hand off skb to network stack */ + pkt_completed = 1; + i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */ + break; + } + default: + i1480u_drop(i1480u, "RX: unknown packet type %u! " + "Dropping\n", untd_hdr_type(untd_hdr)); + goto out; + } + size_left -= untd_hdr_size + untd_frg_size; + if (size_left > 0) + ptr += untd_hdr_size + untd_frg_size; + } + if (pkt_completed) + i1480u_skb_deliver(i1480u); +out: + /* recreate needed RX buffers*/ + if (rx_buf->data == NULL) { + /* buffer is being used to receive packet, create new */ + new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE); + if (!new_skb) { + if (printk_ratelimit()) + dev_err(dev, + "RX: cannot allocate RX buffer\n"); + } else { + new_skb->dev = net_dev; + new_skb->ip_summed = CHECKSUM_NONE; + skb_reserve(new_skb, 2); + rx_buf->data = new_skb; + } + } + return; +} + + +/** + * Called when an RX URB has finished receiving or has found some kind + * of error condition. + * + * LIMITATIONS: + * + * - We read USB-transfers, each transfer contains a SINGLE fragment + * (can contain a complete packet, or a 1st, next, or last fragment + * of a packet). + * Looks like a transfer can contain more than one fragment (07/18/06) + * + * - Each transfer buffer is the size of the maximum packet size (minus + * headroom), i1480u_MAX_PKT_SIZE - 2 + * + * - We always read the full USB-transfer, no partials. + * + * - Each transfer is read directly into a skb. This skb will be used to + * send data to the upper layers if it is the first fragment or a complete + * packet. In the other cases the data will be copied from the skb to + * another skb that is being prepared for the upper layers from a prev + * first fragment. + * + * It is simply too much of a pain. Gosh, there should be a unified + * SG infrastructure for *everything* [so that I could declare a SG + * buffer, pass it to USB for receiving, append some space to it if + * I wish, receive more until I have the whole chunk, adapt + * pointers on each fragment to remove hardware headers and then + * attach that to an skbuff and netif_rx()]. + */ +void i1480u_rx_cb(struct urb *urb) +{ + int result; + int do_parse_buffer = 1; + struct i1480u_rx_buf *rx_buf = urb->context; + struct i1480u *i1480u = rx_buf->i1480u; + struct device *dev = &i1480u->usb_iface->dev; + unsigned long flags; + u8 rx_buf_idx = rx_buf - i1480u->rx_buf; + + switch (urb->status) { + case 0: + break; + case -ECONNRESET: /* Not an error, but a controlled situation; */ + case -ENOENT: /* (we killed the URB)...so, no broadcast */ + case -ESHUTDOWN: /* going away! */ + dev_err(dev, "RX URB[%u]: goind down %d\n", + rx_buf_idx, urb->status); + goto error; + default: + dev_err(dev, "RX URB[%u]: unknown status %d\n", + rx_buf_idx, urb->status); + if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "RX: max acceptable errors exceeded," + " resetting device.\n"); + i1480u_rx_unlink_urbs(i1480u); + wlp_reset_all(&i1480u->wlp); + goto error; + } + do_parse_buffer = 0; + break; + } + spin_lock_irqsave(&i1480u->lock, flags); + /* chew the data fragments, extract network packets */ + if (do_parse_buffer) { + i1480u_rx_buffer(rx_buf); + if (rx_buf->data) { + rx_buf->urb->transfer_buffer = rx_buf->data->data; + result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC); + if (result < 0) { + dev_err(dev, "RX URB[%u]: cannot submit %d\n", + rx_buf_idx, result); + } + } + } + spin_unlock_irqrestore(&i1480u->lock, flags); +error: + return; +} + diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c new file mode 100644 index 0000000..a1d8ca6 --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c @@ -0,0 +1,408 @@ +/* + * WUSB Wire Adapter: WLP interface + * Sysfs interfaces + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ + +#include +#include +#include +#include +#include "i1480u-wlp.h" + + +/** + * + * @dev: Class device from the net_device; assumed refcnted. + * + * Yes, I don't lock--we assume it is refcounted and I am getting a + * single byte value that is kind of atomic to read. + */ +ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf) +{ + return sprintf(buf, "%u\n", + wlp_tx_hdr_phy_rate(&options->def_tx_hdr)); +} +EXPORT_SYMBOL_GPL(uwb_phy_rate_show); + + +ssize_t uwb_phy_rate_store(struct wlp_options *options, + const char *buf, size_t size) +{ + ssize_t result; + unsigned rate; + + result = sscanf(buf, "%u\n", &rate); + if (result != 1) { + result = -EINVAL; + goto out; + } + result = -EINVAL; + if (rate >= UWB_PHY_RATE_INVALID) + goto out; + wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate); + result = 0; +out: + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(uwb_phy_rate_store); + + +ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf) +{ + return sprintf(buf, "%u\n", + wlp_tx_hdr_rts_cts(&options->def_tx_hdr)); +} +EXPORT_SYMBOL_GPL(uwb_rts_cts_show); + + +ssize_t uwb_rts_cts_store(struct wlp_options *options, + const char *buf, size_t size) +{ + ssize_t result; + unsigned value; + + result = sscanf(buf, "%u\n", &value); + if (result != 1) { + result = -EINVAL; + goto out; + } + result = -EINVAL; + wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value); + result = 0; +out: + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(uwb_rts_cts_store); + + +ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf) +{ + return sprintf(buf, "%u\n", + wlp_tx_hdr_ack_policy(&options->def_tx_hdr)); +} +EXPORT_SYMBOL_GPL(uwb_ack_policy_show); + + +ssize_t uwb_ack_policy_store(struct wlp_options *options, + const char *buf, size_t size) +{ + ssize_t result; + unsigned value; + + result = sscanf(buf, "%u\n", &value); + if (result != 1 || value > UWB_ACK_B_REQ) { + result = -EINVAL; + goto out; + } + wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value); + result = 0; +out: + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(uwb_ack_policy_store); + + +/** + * Show the PCA base priority. + * + * We can access without locking, as the value is (for now) orthogonal + * to other values. + */ +ssize_t uwb_pca_base_priority_show(const struct wlp_options *options, + char *buf) +{ + return sprintf(buf, "%u\n", + options->pca_base_priority); +} +EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show); + + +/** + * Set the PCA base priority. + * + * We can access without locking, as the value is (for now) orthogonal + * to other values. + */ +ssize_t uwb_pca_base_priority_store(struct wlp_options *options, + const char *buf, size_t size) +{ + ssize_t result = -EINVAL; + u8 pca_base_priority; + + result = sscanf(buf, "%hhu\n", &pca_base_priority); + if (result != 1) { + result = -EINVAL; + goto out; + } + result = -EINVAL; + if (pca_base_priority >= 8) + goto out; + options->pca_base_priority = pca_base_priority; + /* Update TX header if we are currently using PCA. */ + if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0) + wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority); + result = 0; +out: + return result < 0 ? result : size; +} +EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store); + +/** + * Show current inflight values + * + * Will print the current MAX and THRESHOLD values for the basic flow + * control. In addition it will report how many times the TX queue needed + * to be restarted since the last time this query was made. + */ +static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight, + char *buf) +{ + ssize_t result; + unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ; + unsigned long restart_count = atomic_read(&inflight->restart_count); + + result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n" + "#read: threshold max inflight_count restarts " + "seconds restarts/sec\n" + "#write: threshold max\n", + inflight->threshold, inflight->max, + atomic_read(&inflight->count), + restart_count, sec_elapsed, + sec_elapsed == 0 ? 0 : restart_count/sec_elapsed); + inflight->restart_ts = jiffies; + atomic_set(&inflight->restart_count, 0); + return result; +} + +static +ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight, + const char *buf, size_t size) +{ + unsigned long in_threshold, in_max; + ssize_t result; + result = sscanf(buf, "%lu %lu", &in_threshold, &in_max); + if (result != 2) + return -EINVAL; + if (in_max <= in_threshold) + return -EINVAL; + inflight->max = in_max; + inflight->threshold = in_threshold; + return size; +} +/* + * Glue (or function adaptors) for accesing info on sysfs + * + * [we need this indirection because the PCI driver does almost the + * same] + * + * Linux 2.6.21 changed how 'struct netdevice' does attributes (from + * having a 'struct class_dev' to having a 'struct device'). That is + * quite of a pain. + * + * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE() + * create adaptors for extracting the 'struct i1480u' from a 'struct + * dev' and calling a function for doing a sysfs operation (as we have + * them factorized already). i1480u_ATTR creates the attribute file + * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a + * class_device_attr_NAME or device_attr_NAME (for group registration). + */ +#include + +#define i1480u_SHOW(name, fn, param) \ +static ssize_t i1480u_show_##name(struct device *dev, \ + struct device_attribute *attr,\ + char *buf) \ +{ \ + struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \ + return fn(&i1480u->param, buf); \ +} + +#define i1480u_STORE(name, fn, param) \ +static ssize_t i1480u_store_##name(struct device *dev, \ + struct device_attribute *attr,\ + const char *buf, size_t size)\ +{ \ + struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \ + return fn(&i1480u->param, buf, size); \ +} + +#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \ + i1480u_show_##name,\ + i1480u_store_##name) + +#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \ + S_IRUGO, \ + i1480u_show_##name, NULL) + +#define i1480u_ATTR_NAME(a) (dev_attr_##a) + + +/* + * Sysfs adaptors + */ +i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options); +i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options); +i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR); + +i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options); +i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options); +i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR); + +i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options); +i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options); +i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR); + +i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options); +i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options); +i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_eda, wlp_eda_show, wlp); +i1480u_STORE(wlp_eda, wlp_eda_store, wlp); +i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp); +i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp); +i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp); +i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp); +i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp); +i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp); +i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp); +i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp); +i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp); +i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp); +i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp); +i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp); +i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp); +i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp); +i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp); +i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp); +i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp); +i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp); +i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp); +i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp); +i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR); + +i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp); +i1480u_ATTR_SHOW(wlp_neighborhood); + +i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss); +i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss); +i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR); + +/* + * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over + * the last 256 received WLP frames (ECMA-368 13.3). + * + * [the -7dB that have to be substracted from the LQI to make the LQE + * are already taken into account]. + */ +i1480u_SHOW(wlp_lqe, stats_show, lqe_stats); +i1480u_STORE(wlp_lqe, stats_store, lqe_stats); +i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR); + +/* + * Show the Receive Signal Strength Indicator averaged over all the + * received WLP frames (ECMA-368 13.3). Still is not clear what + * this value is, but is kind of a percentage of the signal strength + * at the antenna. + */ +i1480u_SHOW(wlp_rssi, stats_show, rssi_stats); +i1480u_STORE(wlp_rssi, stats_store, rssi_stats); +i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR); + +/** + * We maintain a basic flow control counter. "count" how many TX URBs are + * outstanding. Only allow "max" + * TX URBs to be outstanding. If this value is reached the queue will be + * stopped. The queue will be restarted when there are + * "threshold" URBs outstanding. + */ +i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight); +i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight); +i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR); + +static struct attribute *i1480u_attrs[] = { + &i1480u_ATTR_NAME(uwb_phy_rate).attr, + &i1480u_ATTR_NAME(uwb_rts_cts).attr, + &i1480u_ATTR_NAME(uwb_ack_policy).attr, + &i1480u_ATTR_NAME(uwb_pca_base_priority).attr, + &i1480u_ATTR_NAME(wlp_lqe).attr, + &i1480u_ATTR_NAME(wlp_rssi).attr, + &i1480u_ATTR_NAME(wlp_eda).attr, + &i1480u_ATTR_NAME(wlp_uuid).attr, + &i1480u_ATTR_NAME(wlp_dev_name).attr, + &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr, + &i1480u_ATTR_NAME(wlp_dev_model_name).attr, + &i1480u_ATTR_NAME(wlp_dev_model_nr).attr, + &i1480u_ATTR_NAME(wlp_dev_serial).attr, + &i1480u_ATTR_NAME(wlp_dev_prim_category).attr, + &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr, + &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr, + &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr, + &i1480u_ATTR_NAME(wlp_neighborhood).attr, + &i1480u_ATTR_NAME(wss_activate).attr, + &i1480u_ATTR_NAME(wlp_tx_inflight).attr, + NULL, +}; + +static struct attribute_group i1480u_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = i1480u_attrs, +}; + +int i1480u_sysfs_setup(struct i1480u *i1480u) +{ + int result; + struct device *dev = &i1480u->usb_iface->dev; + result = sysfs_create_group(&i1480u->net_dev->dev.kobj, + &i1480u_attr_group); + if (result < 0) + dev_err(dev, "cannot initialize sysfs attributes: %d\n", + result); + return result; +} + + +void i1480u_sysfs_release(struct i1480u *i1480u) +{ + sysfs_remove_group(&i1480u->net_dev->dev.kobj, + &i1480u_attr_group); +} diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c new file mode 100644 index 0000000..3426bfb --- /dev/null +++ b/drivers/uwb/i1480/i1480u-wlp/tx.c @@ -0,0 +1,632 @@ +/* + * WUSB Wire Adapter: WLP interface + * Deal with TX (massaging data to transmit, handling it) + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Transmission engine. Get an skb, create from that a WLP transmit + * context, add a WLP TX header (which we keep prefilled in the + * device's instance), fill out the target-specific fields and + * fire it. + * + * ROADMAP: + * + * Entry points: + * + * i1480u_tx_release(): called by i1480u_disconnect() to release + * pending tx contexts. + * + * i1480u_tx_cb(): callback for TX contexts (USB URBs) + * i1480u_tx_destroy(): + * + * i1480u_tx_timeout(): called for timeout handling from the + * network stack. + * + * i1480u_hard_start_xmit(): called for transmitting an skb from + * the network stack. Will interact with WLP + * substack to verify and prepare frame. + * i1480u_xmit_frame(): actual transmission on hardware + * + * i1480u_tx_create() Creates TX context + * i1480u_tx_create_1() For packets in 1 fragment + * i1480u_tx_create_n() For packets in >1 fragments + * + * TODO: + * + * - FIXME: rewrite using usb_sg_*(), add asynch support to + * usb_sg_*(). It might not make too much sense as most of + * the times the MTU will be smaller than one page... + */ + +#include "i1480u-wlp.h" +#define D_LOCAL 5 +#include + +enum { + /* This is only for Next and Last TX packets */ + i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE + - sizeof(struct untd_hdr_rst), +}; + +/** Free resources allocated to a i1480u tx context. */ +static +void i1480u_tx_free(struct i1480u_tx *wtx) +{ + kfree(wtx->buf); + if (wtx->skb) + dev_kfree_skb_irq(wtx->skb); + usb_free_urb(wtx->urb); + kfree(wtx); +} + +static +void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx) +{ + unsigned long flags; + spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */ + list_del(&wtx->list_node); + i1480u_tx_free(wtx); + spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); +} + +static +void i1480u_tx_unlink_urbs(struct i1480u *i1480u) +{ + unsigned long flags; + struct i1480u_tx *wtx, *next; + + spin_lock_irqsave(&i1480u->tx_list_lock, flags); + list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) { + usb_unlink_urb(wtx->urb); + } + spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); +} + + +/** + * Callback for a completed tx USB URB. + * + * TODO: + * + * - FIXME: recover errors more gracefully + * - FIXME: handle NAKs (I dont think they come here) for flow ctl + */ +static +void i1480u_tx_cb(struct urb *urb) +{ + struct i1480u_tx *wtx = urb->context; + struct i1480u *i1480u = wtx->i1480u; + struct net_device *net_dev = i1480u->net_dev; + struct device *dev = &i1480u->usb_iface->dev; + unsigned long flags; + + switch (urb->status) { + case 0: + spin_lock_irqsave(&i1480u->lock, flags); + i1480u->stats.tx_packets++; + i1480u->stats.tx_bytes += urb->actual_length; + spin_unlock_irqrestore(&i1480u->lock, flags); + break; + case -ECONNRESET: /* Not an error, but a controlled situation; */ + case -ENOENT: /* (we killed the URB)...so, no broadcast */ + dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status); + netif_stop_queue(net_dev); + break; + case -ESHUTDOWN: /* going away! */ + dev_dbg(dev, "notif endp: down %d\n", urb->status); + netif_stop_queue(net_dev); + break; + default: + dev_err(dev, "TX: unknown URB status %d\n", urb->status); + if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "TX: max acceptable errors exceeded." + "Reset device.\n"); + netif_stop_queue(net_dev); + i1480u_tx_unlink_urbs(i1480u); + wlp_reset_all(&i1480u->wlp); + } + break; + } + i1480u_tx_destroy(i1480u, wtx); + if (atomic_dec_return(&i1480u->tx_inflight.count) + <= i1480u->tx_inflight.threshold + && netif_queue_stopped(net_dev) + && i1480u->tx_inflight.threshold != 0) { + if (d_test(2) && printk_ratelimit()) + d_printf(2, dev, "Restart queue. \n"); + netif_start_queue(net_dev); + atomic_inc(&i1480u->tx_inflight.restart_count); + } + return; +} + + +/** + * Given a buffer that doesn't fit in a single fragment, create an + * scatter/gather structure for delivery to the USB pipe. + * + * Implements functionality of i1480u_tx_create(). + * + * @wtx: tx descriptor + * @skb: skb to send + * @gfp_mask: gfp allocation mask + * @returns: Pointer to @wtx if ok, NULL on error. + * + * Sorry, TOO LONG a function, but breaking it up is kind of hard + * + * This will break the buffer in chunks smaller than + * i1480u_MAX_FRG_SIZE (including the header) and add proper headers + * to each: + * + * 1st header \ + * i1480 tx header | fragment 1 + * fragment data / + * nxt header \ fragment 2 + * fragment data / + * .. + * .. + * last header \ fragment 3 + * last fragment data / + * + * This does not fill the i1480 TX header, it is left up to the + * caller to do that; you can get it from @wtx->wlp_tx_hdr. + * + * This function consumes the skb unless there is an error. + */ +static +int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, + gfp_t gfp_mask) +{ + int result; + void *pl; + size_t pl_size; + + void *pl_itr, *buf_itr; + size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0; + struct untd_hdr_1st *untd_hdr_1st; + struct wlp_tx_hdr *wlp_tx_hdr; + struct untd_hdr_rst *untd_hdr_rst; + + wtx->skb = NULL; + pl = skb->data; + pl_itr = pl; + pl_size = skb->len; + pl_size_left = pl_size; /* payload size */ + /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus + * the headers */ + pl_size_1st = i1480u_MAX_FRG_SIZE + - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr); + BUG_ON(pl_size_1st > pl_size); + pl_size_left -= pl_size_1st; + /* The rest have an smaller header (no i1480 TX header). We + * need to break up the payload in blocks smaller than + * i1480u_MAX_PL_SIZE (payload excluding header). */ + frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE; + /* Allocate space for the new buffer. In this new buffer we'll + * place the headers followed by the data fragment, headers, + * data fragments, etc.. + */ + result = -ENOMEM; + wtx->buf_size = sizeof(*untd_hdr_1st) + + sizeof(*wlp_tx_hdr) + + frgs * sizeof(*untd_hdr_rst) + + pl_size; + wtx->buf = kmalloc(wtx->buf_size, gfp_mask); + if (wtx->buf == NULL) + goto error_buf_alloc; + + buf_itr = wtx->buf; /* We got the space, let's fill it up */ + /* Fill 1st fragment */ + untd_hdr_1st = buf_itr; + buf_itr += sizeof(*untd_hdr_1st); + untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST); + untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0); + untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr)); + untd_hdr_1st->fragment_len = + cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr)); + memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding)); + /* Set up i1480 header info */ + wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr; + buf_itr += sizeof(*wlp_tx_hdr); + /* Copy the first fragment */ + memcpy(buf_itr, pl_itr, pl_size_1st); + pl_itr += pl_size_1st; + buf_itr += pl_size_1st; + + /* Now do each remaining fragment */ + result = -EINVAL; + while (pl_size_left > 0) { + d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", + pl_size_left, buf_itr - wtx->buf); + if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf + > wtx->buf_size) { + printk(KERN_ERR "BUG: no space for header\n"); + goto error_bug; + } + d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", + pl_size_left, buf_itr - wtx->buf); + untd_hdr_rst = buf_itr; + buf_itr += sizeof(*untd_hdr_rst); + if (pl_size_left > i1480u_MAX_PL_SIZE) { + frg_pl_size = i1480u_MAX_PL_SIZE; + untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT); + } else { + frg_pl_size = pl_size_left; + untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); + } + d_printf(5, NULL, + "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", + pl_size_left, buf_itr - wtx->buf, frg_pl_size); + untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); + untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); + untd_hdr_rst->padding = 0; + if (buf_itr + frg_pl_size - wtx->buf + > wtx->buf_size) { + printk(KERN_ERR "BUG: no space for payload\n"); + goto error_bug; + } + memcpy(buf_itr, pl_itr, frg_pl_size); + buf_itr += frg_pl_size; + pl_itr += frg_pl_size; + pl_size_left -= frg_pl_size; + d_printf(5, NULL, + "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", + pl_size_left, buf_itr - wtx->buf, frg_pl_size); + } + dev_kfree_skb_irq(skb); + return 0; + +error_bug: + printk(KERN_ERR + "BUG: skb %u bytes\n" + "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n" + "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n", + skb->len, + frg_pl_size, i1480u_MAX_FRG_SIZE, + buf_itr - wtx->buf, wtx->buf_size, pl_size_left); + + kfree(wtx->buf); +error_buf_alloc: + return result; +} + + +/** + * Given a buffer that fits in a single fragment, fill out a @wtx + * struct for transmitting it down the USB pipe. + * + * Uses the fact that we have space reserved in front of the skbuff + * for hardware headers :] + * + * This does not fill the i1480 TX header, it is left up to the + * caller to do that; you can get it from @wtx->wlp_tx_hdr. + * + * @pl: pointer to payload data + * @pl_size: size of the payuload + * + * This function does not consume the @skb. + */ +static +int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb, + gfp_t gfp_mask) +{ + struct untd_hdr_cmp *untd_hdr_cmp; + struct wlp_tx_hdr *wlp_tx_hdr; + + wtx->buf = NULL; + wtx->skb = skb; + BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr)); + wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr)); + wtx->wlp_tx_hdr = wlp_tx_hdr; + BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp)); + untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp)); + + untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP); + untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0); + untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp)); + untd_hdr_cmp->padding = 0; + return 0; +} + + +/** + * Given a skb to transmit, massage it to become palatable for the TX pipe + * + * This will break the buffer in chunks smaller than + * i1480u_MAX_FRG_SIZE and add proper headers to each. + * + * 1st header \ + * i1480 tx header | fragment 1 + * fragment data / + * nxt header \ fragment 2 + * fragment data / + * .. + * .. + * last header \ fragment 3 + * last fragment data / + * + * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE. + * + * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the + * following is composed: + * + * complete header \ + * i1480 tx header | single fragment + * packet data / + * + * We were going to use s/g support, but because the interface is + * synch and at the end there is plenty of overhead to do it, it + * didn't seem that worth for data that is going to be smaller than + * one page. + */ +static +struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u, + struct sk_buff *skb, gfp_t gfp_mask) +{ + int result; + struct usb_endpoint_descriptor *epd; + int usb_pipe; + unsigned long flags; + + struct i1480u_tx *wtx; + const size_t pl_max_size = + i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp) + - sizeof(struct wlp_tx_hdr); + + wtx = kmalloc(sizeof(*wtx), gfp_mask); + if (wtx == NULL) + goto error_wtx_alloc; + wtx->urb = usb_alloc_urb(0, gfp_mask); + if (wtx->urb == NULL) + goto error_urb_alloc; + epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc; + usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress); + /* Fits in a single complete packet or need to split? */ + if (skb->len > pl_max_size) { + result = i1480u_tx_create_n(wtx, skb, gfp_mask); + if (result < 0) + goto error_create; + usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe, + wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx); + } else { + result = i1480u_tx_create_1(wtx, skb, gfp_mask); + if (result < 0) + goto error_create; + usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe, + skb->data, skb->len, i1480u_tx_cb, wtx); + } + spin_lock_irqsave(&i1480u->tx_list_lock, flags); + list_add(&wtx->list_node, &i1480u->tx_list); + spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); + return wtx; + +error_create: + kfree(wtx->urb); +error_urb_alloc: + kfree(wtx); +error_wtx_alloc: + return NULL; +} + +/** + * Actual fragmentation and transmission of frame + * + * @wlp: WLP substack data structure + * @skb: To be transmitted + * @dst: Device address of destination + * @returns: 0 on success, <0 on failure + * + * This function can also be called directly (not just from + * hard_start_xmit), so we also check here if the interface is up before + * taking sending anything. + */ +int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, + struct uwb_dev_addr *dst) +{ + int result = -ENXIO; + struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp); + struct device *dev = &i1480u->usb_iface->dev; + struct net_device *net_dev = i1480u->net_dev; + struct i1480u_tx *wtx; + struct wlp_tx_hdr *wlp_tx_hdr; + static unsigned char dev_bcast[2] = { 0xff, 0xff }; +#if 0 + int lockup = 50; +#endif + + d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, + net_dev); + BUG_ON(i1480u->wlp.rc == NULL); + if ((net_dev->flags & IFF_UP) == 0) + goto out; + result = -EBUSY; + if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { + if (d_test(2) && printk_ratelimit()) + d_printf(2, dev, "Max frames in flight " + "stopping queue.\n"); + netif_stop_queue(net_dev); + goto error_max_inflight; + } + result = -ENOMEM; + wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC); + if (unlikely(wtx == NULL)) { + if (printk_ratelimit()) + dev_err(dev, "TX: no memory for WLP TX URB," + "dropping packet (in flight %d)\n", + atomic_read(&i1480u->tx_inflight.count)); + netif_stop_queue(net_dev); + goto error_wtx_alloc; + } + wtx->i1480u = i1480u; + /* Fill out the i1480 header; @i1480u->def_tx_hdr read without + * locking. We do so because they are kind of orthogonal to + * each other (and thus not changed in an atomic batch). + * The ETH header is right after the WLP TX header. */ + wlp_tx_hdr = wtx->wlp_tx_hdr; + *wlp_tx_hdr = i1480u->options.def_tx_hdr; + wlp_tx_hdr->dstaddr = *dst; + if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast)) + && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) { + /*Broadcast message directed to DRP host. Send as best effort + * on PCA. */ + wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); + } + +#if 0 + dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); + dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); +#endif +#if 0 + /* simulates a device lockup after every lockup# packets */ + if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { + /* Simulate a dropped transmit interrupt */ + net_dev->trans_start = jiffies; + netif_stop_queue(net_dev); + dev_err(dev, "Simulate lockup at %ld\n", jiffies); + return result; + } +#endif + + result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ + if (result < 0) { + dev_err(dev, "TX: cannot submit URB: %d\n", result); + /* We leave the freeing of skb to calling function */ + wtx->skb = NULL; + goto error_tx_urb_submit; + } + atomic_inc(&i1480u->tx_inflight.count); + net_dev->trans_start = jiffies; + d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, + net_dev, result); + return result; + +error_tx_urb_submit: + i1480u_tx_destroy(i1480u, wtx); +error_wtx_alloc: +error_max_inflight: +out: + d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, + net_dev, result); + return result; +} + + +/** + * Transmit an skb Called when an skbuf has to be transmitted + * + * The skb is first passed to WLP substack to ensure this is a valid + * frame. If valid the device address of destination will be filled and + * the WLP header prepended to the skb. If this step fails we fake sending + * the frame, if we return an error the network stack will just keep trying. + * + * Broadcast frames inside a WSS needs to be treated special as multicast is + * not supported. A broadcast frame is sent as unicast to each member of the + * WSS - this is done by the WLP substack when it finds a broadcast frame. + * So, we test if the WLP substack took over the skb and only transmit it + * if it has not (been taken over). + * + * @net_dev->xmit_lock is held + */ +int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +{ + int result; + struct i1480u *i1480u = netdev_priv(net_dev); + struct device *dev = &i1480u->usb_iface->dev; + struct uwb_dev_addr dst; + + d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, + net_dev); + BUG_ON(i1480u->wlp.rc == NULL); + if ((net_dev->flags & IFF_UP) == 0) + goto error; + result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); + if (result < 0) { + dev_err(dev, "WLP verification of TX frame failed (%d). " + "Dropping packet.\n", result); + goto error; + } else if (result == 1) { + d_printf(6, dev, "WLP will transmit frame. \n"); + /* trans_start time will be set when WLP actually transmits + * the frame */ + goto out; + } + d_printf(6, dev, "Transmitting frame. \n"); + result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); + if (result < 0) { + dev_err(dev, "Frame TX failed (%d).\n", result); + goto error; + } + d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, + net_dev, result); + return NETDEV_TX_OK; +error: + dev_kfree_skb_any(skb); + i1480u->stats.tx_dropped++; +out: + d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, + net_dev, result); + return NETDEV_TX_OK; +} + + +/** + * Called when a pkt transmission doesn't complete in a reasonable period + * Device reset may sleep - do it outside of interrupt context (delayed) + */ +void i1480u_tx_timeout(struct net_device *net_dev) +{ + struct i1480u *i1480u = netdev_priv(net_dev); + + wlp_reset_all(&i1480u->wlp); +} + + +void i1480u_tx_release(struct i1480u *i1480u) +{ + unsigned long flags; + struct i1480u_tx *wtx, *next; + int count = 0, empty; + + spin_lock_irqsave(&i1480u->tx_list_lock, flags); + list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) { + count++; + usb_unlink_urb(wtx->urb); + } + spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); + count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */ + /* + * We don't like this sollution too much (dirty as it is), but + * it is cheaper than putting a refcount on each i1480u_tx and + * i1480uting for all of them to go away... + * + * Called when no more packets can be added to tx_list + * so can i1480ut for it to be empty. + */ + while (1) { + spin_lock_irqsave(&i1480u->tx_list_lock, flags); + empty = list_empty(&i1480u->tx_list); + spin_unlock_irqrestore(&i1480u->tx_list_lock, flags); + if (empty) + break; + count--; + BUG_ON(count == 0); + msleep(20); + } +} -- cgit v0.10.2 From c7f736484f8ecde4dc1bc8459179c4d65f2ccbe4 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:22 +0100 Subject: wusb: add the Wireless USB include files. Common header files derived from the WUSB 1.0 specification. Signed-off-by: David Vrabel diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h new file mode 100644 index 0000000..a102561 --- /dev/null +++ b/include/linux/usb/wusb-wa.h @@ -0,0 +1,271 @@ +/* + * Wireless USB Wire Adapter constants and structures. + * + * Copyright (C) 2005-2006 Intel Corporation. + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: organize properly, group logically + * + * All the event structures are defined in uwb/spec.h, as they are + * common to the WHCI and WUSB radio control interfaces. + * + * References: + * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 + */ +#ifndef __LINUX_USB_WUSB_WA_H +#define __LINUX_USB_WUSB_WA_H + +/** + * Radio Command Request for the Radio Control Interface + * + * Radio Control Interface command and event codes are the same as + * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* + */ +enum { + WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ +}; + +/* Wireless Adapter Requests ([WUSB] table 8-51) */ +enum { + WUSB_REQ_ADD_MMC_IE = 20, + WUSB_REQ_REMOVE_MMC_IE = 21, + WUSB_REQ_SET_NUM_DNTS = 22, + WUSB_REQ_SET_CLUSTER_ID = 23, + WUSB_REQ_SET_DEV_INFO = 24, + WUSB_REQ_GET_TIME = 25, + WUSB_REQ_SET_STREAM_IDX = 26, + WUSB_REQ_SET_WUSB_MAS = 27, +}; + + +/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ +enum { + WUSB_TIME_ADJ = 0, + WUSB_TIME_BPST = 1, + WUSB_TIME_WUSB = 2, +}; + +enum { + WA_ENABLE = 0x01, + WA_RESET = 0x02, + RPIPE_PAUSE = 0x1, +}; + +/* Responses from Get Status request ([WUSB] section 8.3.1.6) */ +enum { + WA_STATUS_ENABLED = 0x01, + WA_STATUS_RESETTING = 0x02 +}; + +enum rpipe_crs { + RPIPE_CRS_CTL = 0x01, + RPIPE_CRS_ISO = 0x02, + RPIPE_CRS_BULK = 0x04, + RPIPE_CRS_INTR = 0x08 +}; + +/** + * RPipe descriptor ([WUSB] section 8.5.2.11) + * + * FIXME: explain rpipes + */ +struct usb_rpipe_descriptor { + u8 bLength; + u8 bDescriptorType; + __le16 wRPipeIndex; + __le16 wRequests; + __le16 wBlocks; /* rw if 0 */ + __le16 wMaxPacketSize; /* rw? */ + u8 bHSHubAddress; /* reserved: 0 */ + u8 bHSHubPort; /* ??? FIXME ??? */ + u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ + u8 bDeviceAddress; /* rw: Target device address */ + u8 bEndpointAddress; /* rw: Target EP address */ + u8 bDataSequence; /* ro: Current Data sequence */ + __le32 dwCurrentWindow; /* ro */ + u8 bMaxDataSequence; /* ro?: max supported seq */ + u8 bInterval; /* rw: */ + u8 bOverTheAirInterval; /* rw: */ + u8 bmAttribute; /* ro? */ + u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ + u8 bmRetryOptions; /* rw? */ + __le16 wNumTransactionErrors; /* rw */ +} __attribute__ ((packed)); + +/** + * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) + * + * These are the notifications coming on the notification endpoint of + * an HWA and a DWA. + */ +enum wa_notif_type { + DWA_NOTIF_RWAKE = 0x91, + DWA_NOTIF_PORTSTATUS = 0x92, + WA_NOTIF_TRANSFER = 0x93, + HWA_NOTIF_BPST_ADJ = 0x94, + HWA_NOTIF_DN = 0x95, +}; + +/** + * Wire Adapter notification header + * + * Notifications coming from a wire adapter use a common header + * defined in [WUSB] sections 8.4.5 & 8.5.4. + */ +struct wa_notif_hdr { + u8 bLength; + u8 bNotifyType; /* enum wa_notif_type */ +} __attribute__((packed)); + +/** + * HWA DN Received notification [(WUSB] section 8.5.4.2) + * + * The DNData is specified in WUSB1.0[7.6]. For each device + * notification we received, we just need to dispatch it. + * + * @dndata: this is really an array of notifications, but all start + * with the same header. + */ +struct hwa_notif_dn { + struct wa_notif_hdr hdr; + u8 bSourceDeviceAddr; /* from errata 2005/07 */ + u8 bmAttributes; + struct wusb_dn_hdr dndata[]; +} __attribute__((packed)); + +/* [WUSB] section 8.3.3 */ +enum wa_xfer_type { + WA_XFER_TYPE_CTL = 0x80, + WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ + WA_XFER_TYPE_ISO = 0x82, + WA_XFER_RESULT = 0x83, + WA_XFER_ABORT = 0x84, +}; + +/* [WUSB] section 8.3.3 */ +struct wa_xfer_hdr { + u8 bLength; /* 0x18 */ + u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ + __le16 wRPipe; /* RPipe index */ + __le32 dwTransferID; /* Host-assigned ID */ + __le32 dwTransferLength; /* Length of data to xfer */ + u8 bTransferSegment; +} __attribute__((packed)); + +struct wa_xfer_ctl { + struct wa_xfer_hdr hdr; + u8 bmAttribute; + __le16 wReserved; + struct usb_ctrlrequest baSetupData; +} __attribute__((packed)); + +struct wa_xfer_bi { + struct wa_xfer_hdr hdr; + u8 bReserved; + __le16 wReserved; +} __attribute__((packed)); + +struct wa_xfer_hwaiso { + struct wa_xfer_hdr hdr; + u8 bReserved; + __le16 wPresentationTime; + __le32 dwNumOfPackets; + /* FIXME: u8 pktdata[]? */ +} __attribute__((packed)); + +/* [WUSB] section 8.3.3.5 */ +struct wa_xfer_abort { + u8 bLength; + u8 bRequestType; + __le16 wRPipe; /* RPipe index */ + __le32 dwTransferID; /* Host-assigned ID */ +} __attribute__((packed)); + +/** + * WA Transfer Complete notification ([WUSB] section 8.3.3.3) + * + */ +struct wa_notif_xfer { + struct wa_notif_hdr hdr; + u8 bEndpoint; + u8 Reserved; +} __attribute__((packed)); + +/** Transfer result basic codes [WUSB] table 8-15 */ +enum { + WA_XFER_STATUS_SUCCESS, + WA_XFER_STATUS_HALTED, + WA_XFER_STATUS_DATA_BUFFER_ERROR, + WA_XFER_STATUS_BABBLE, + WA_XFER_RESERVED, + WA_XFER_STATUS_NOT_FOUND, + WA_XFER_STATUS_INSUFFICIENT_RESOURCE, + WA_XFER_STATUS_TRANSACTION_ERROR, + WA_XFER_STATUS_ABORTED, + WA_XFER_STATUS_RPIPE_NOT_READY, + WA_XFER_INVALID_FORMAT, + WA_XFER_UNEXPECTED_SEGMENT_NUMBER, + WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, +}; + +/** [WUSB] section 8.3.3.4 */ +struct wa_xfer_result { + struct wa_notif_hdr hdr; + __le32 dwTransferID; + __le32 dwTransferLength; + u8 bTransferSegment; + u8 bTransferStatus; + __le32 dwNumOfPackets; +} __attribute__((packed)); + +/** + * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). + * + * NOTE: u16 fields are read Little Endian from the hardware. + * + * @bNumPorts is the original max number of devices that the host can + * connect; we might chop this so the stack can handle + * it. In case you need to access it, use wusbhc->ports_max + * if it is a Wireless USB WA. + */ +struct usb_wa_descriptor { + u8 bLength; + u8 bDescriptorType; + u16 bcdWAVersion; + u8 bNumPorts; /* don't use!! */ + u8 bmAttributes; /* Reserved == 0 */ + u16 wNumRPipes; + u16 wRPipeMaxBlock; + u8 bRPipeBlockSize; + u8 bPwrOn2PwrGood; + u8 bNumMMCIEs; + u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ +} __attribute__((packed)); + +/** + * HWA Device Information Buffer (WUSB1.0[T8.54]) + */ +struct hwa_dev_info { + u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ + u8 bDeviceAddress; + __le16 wPHYRates; + u8 bmDeviceAttribute; +} __attribute__((packed)); + +#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h new file mode 100644 index 0000000..5f401b6 --- /dev/null +++ b/include/linux/usb/wusb.h @@ -0,0 +1,376 @@ +/* + * Wireless USB Standard Definitions + * Event Size Tables + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: organize properly, group logically + * + * All the event structures are defined in uwb/spec.h, as they are + * common to the WHCI and WUSB radio control interfaces. + */ + +#ifndef __WUSB_H__ +#define __WUSB_H__ + +#include +#include +#include +#include +#include + +/** + * WUSB Information Element header + * + * I don't know why, they decided to make it different to the MBOA MAC + * IE Header; beats me. + */ +struct wuie_hdr { + u8 bLength; + u8 bIEIdentifier; +} __attribute__((packed)); + +enum { + WUIE_ID_WCTA = 0x80, + WUIE_ID_CONNECTACK, + WUIE_ID_HOST_INFO, + WUIE_ID_CHANGE_ANNOUNCE, + WUIE_ID_DEVICE_DISCONNECT, + WUIE_ID_HOST_DISCONNECT, + WUIE_ID_KEEP_ALIVE = 0x89, + WUIE_ID_ISOCH_DISCARD, + WUIE_ID_RESET_DEVICE, +}; + +/** + * Maximum number of array elements in a WUSB IE. + * + * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that + * are "arrays" have to limited to 4 elements. So we define it + * like that to ease up and submit only the neeed size. + */ +#define WUIE_ELT_MAX 4 + +/** + * Wrapper for the data that defines a CHID, a CDID or a CK + * + * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of + * data. In order to avoid confusion and enforce types, we wrap it. + * + * Make it packed, as we use it in some hw defintions. + */ +struct wusb_ckhdid { + u8 data[16]; +} __attribute__((packed)); + +const static +struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; + +#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) + +/** + * WUSB IE: Host Information (WUSB1.0[7.5.2]) + * + * Used to provide information about the host to the Wireless USB + * devices in range (CHID can be used as an ASCII string). + */ +struct wuie_host_info { + struct wuie_hdr hdr; + __le16 attributes; + struct wusb_ckhdid CHID; +} __attribute__((packed)); + +/** + * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) + * + * Used to acknowledge device connect requests. See note for + * WUIE_ELT_MAX. + */ +struct wuie_connect_ack { + struct wuie_hdr hdr; + struct { + struct wusb_ckhdid CDID; + u8 bDeviceAddress; /* 0 means unused */ + u8 bReserved; + } blk[WUIE_ELT_MAX]; +} __attribute__((packed)); + +/** + * WUSB IE Host Information Element, Connect Availability + * + * WUSB1.0[7.5.2], bmAttributes description + */ +enum { + WUIE_HI_CAP_RECONNECT = 0, + WUIE_HI_CAP_LIMITED, + WUIE_HI_CAP_RESERVED, + WUIE_HI_CAP_ALL, +}; + +/** + * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) + * + * Tells devices the host is going to stop sending MMCs and will dissapear. + */ +struct wuie_channel_stop { + struct wuie_hdr hdr; + u8 attributes; + u8 timestamp[3]; +} __attribute__((packed)); + +/** + * WUSB IE: Keepalive (WUSB1.0[7.5.9]) + * + * Ask device(s) to send keepalives. + */ +struct wuie_keep_alive { + struct wuie_hdr hdr; + u8 bDeviceAddress[WUIE_ELT_MAX]; +} __attribute__((packed)); + +/** + * WUSB IE: Reset device (WUSB1.0[7.5.11]) + * + * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only + * use it for one at the time... + * + * In any case, this request is a wee bit silly: why don't they target + * by address?? + */ +struct wuie_reset { + struct wuie_hdr hdr; + struct wusb_ckhdid CDID; +} __attribute__((packed)); + +/** + * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) + * + * Tell device to disconnect; we can fit 4 addresses, but we only use + * it for one at the time... + */ +struct wuie_disconnect { + struct wuie_hdr hdr; + u8 bDeviceAddress; + u8 padding; +} __attribute__((packed)); + +/** + * WUSB IE: Host disconnect ([WUSB] section 7.5.5) + * + * Tells all connected devices to disconnect. + */ +struct wuie_host_disconnect { + struct wuie_hdr hdr; +} __attribute__((packed)); + +/** + * WUSB Device Notification header (WUSB1.0[7.6]) + */ +struct wusb_dn_hdr { + u8 bType; + u8 notifdata[]; +} __attribute__((packed)); + +/** Device Notification codes (WUSB1.0[Table 7-54]) */ +enum WUSB_DN { + WUSB_DN_CONNECT = 0x01, + WUSB_DN_DISCONNECT = 0x02, + WUSB_DN_EPRDY = 0x03, + WUSB_DN_MASAVAILCHANGED = 0x04, + WUSB_DN_RWAKE = 0x05, + WUSB_DN_SLEEP = 0x06, + WUSB_DN_ALIVE = 0x07, +}; + +/** WUSB Device Notification Connect */ +struct wusb_dn_connect { + struct wusb_dn_hdr hdr; + __le16 attributes; + struct wusb_ckhdid CDID; +} __attribute__((packed)); + +static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) +{ + return le16_to_cpu(dn->attributes) & 0xff; +} + +static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) +{ + return (le16_to_cpu(dn->attributes) >> 8) & 0x1; +} + +static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) +{ + return (le16_to_cpu(dn->attributes) >> 9) & 0x03; +} + +/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ +struct wusb_dn_alive { + struct wusb_dn_hdr hdr; +} __attribute__((packed)); + +/** Device is disconnecting (WUSB1.0[7.6.2]) */ +struct wusb_dn_disconnect { + struct wusb_dn_hdr hdr; +} __attribute__((packed)); + +/* General constants */ +enum { + WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ +}; + +static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size, + const struct wusb_ckhdid *ckhdid) +{ + return scnprintf(pr_ckhdid, size, + "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx " + "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx", + ckhdid->data[0], ckhdid->data[1], + ckhdid->data[2], ckhdid->data[3], + ckhdid->data[4], ckhdid->data[5], + ckhdid->data[6], ckhdid->data[7], + ckhdid->data[8], ckhdid->data[9], + ckhdid->data[10], ckhdid->data[11], + ckhdid->data[12], ckhdid->data[13], + ckhdid->data[14], ckhdid->data[15]); +} + +/* + * WUSB Crypto stuff (WUSB1.0[6]) + */ + +extern const char *wusb_et_name(u8); + +/** + * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for + * the host or the device. + */ +static inline u8 wusb_key_index(int index, int type, int originator) +{ + return (originator << 6) | (type << 4) | index; +} + +#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ +#define WUSB_KEY_INDEX_TYPE_ASSOC 1 +#define WUSB_KEY_INDEX_TYPE_GTK 2 +#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 +#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 + +/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ +struct aes_ccm_nonce { + u8 sfn[6]; /* Little Endian */ + u8 tkid[3]; /* LE */ + struct uwb_dev_addr dest_addr; + struct uwb_dev_addr src_addr; +} __attribute__((packed)); + +/* A CCM operation label, defined on WUSB1.0[6.5.x] */ +struct aes_ccm_label { + u8 data[14]; +} __attribute__((packed)); + +/* + * Input to the key derivation sequence defined in + * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the + * PRF function. + */ +struct wusb_keydvt_in { + u8 hnonce[16]; + u8 dnonce[16]; +} __attribute__((packed)); + +/* + * Output from the key derivation sequence defined in + * WUSB1.0[6.5.1]. + */ +struct wusb_keydvt_out { + u8 kck[16]; + u8 ptk[16]; +} __attribute__((packed)); + +/* Pseudo Random Function WUSB1.0[6.5] */ +extern int wusb_crypto_init(void); +extern void wusb_crypto_exit(void); +extern ssize_t wusb_prf(void *out, size_t out_size, + const u8 key[16], const struct aes_ccm_nonce *_n, + const struct aes_ccm_label *a, + const void *b, size_t blen, size_t len); + +static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 64); +} + +static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 128); +} + +static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, + const void *b, size_t blen) +{ + return wusb_prf(out, out_size, key, n, a, b, blen, 256); +} + +/* Key derivation WUSB1.0[6.5.1] */ +static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, + const u8 key[16], + const struct aes_ccm_nonce *n, + const struct wusb_keydvt_in *keydvt_in) +{ + const struct aes_ccm_label a = { .data = "Pair-wise keys" }; + return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, + keydvt_in, sizeof(*keydvt_in)); +} + +/* + * Out-of-band MIC Generation WUSB1.0[6.5.2] + * + * Compute the MIC over @key, @n and @hs and place it in @mic_out. + * + * @mic_out: Where to place the 8 byte MIC tag + * @key: KCK from the derivation process + * @n: CCM nonce, n->sfn == 0, TKID as established in the + * process. + * @hs: Handshake struct for phase 2 of the 4-way. + * hs->bStatus and hs->bReserved are zero. + * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] + * hs->dest_addr is the device's USB address padded with 0 + * hs->src_addr is the hosts's UWB device address + * hs->mic is ignored (as we compute that value). + */ +static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], + const struct aes_ccm_nonce *n, + const struct usb_handshake *hs) +{ + const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; + return wusb_prf_64(mic_out, 8, key, n, &a, + hs, sizeof(*hs) - sizeof(hs->MIC)); +} + +#endif /* #ifndef __WUSB_H__ */ -- cgit v0.10.2 From 90ff96f22426a9d1a06df97dead0a9098facb567 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:23 +0100 Subject: wusb: add the Wireless USB core Add support for Ceritified Wireless USB 1.0 to the USB stack. This has been split into several patches for easier review. core (this patch): - host controller infrastructure - cluster reservation - UWB PAL registration - fake root hub protocol: - MMC management (start/stop, managing IEs) - device connection security: - device authentication and authorization build-system: - Kconfig and Kbuild files Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c new file mode 100644 index 0000000..7897a19 --- /dev/null +++ b/drivers/usb/wusbcore/dev-sysfs.c @@ -0,0 +1,143 @@ +/* + * WUSB devices + * sysfs bindings + * + * Copyright (C) 2007 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Get them out of the way... + */ + +#include +#include +#include +#include "wusbhc.h" + +#undef D_LOCAL +#define D_LOCAL 4 +#include + +static ssize_t wusb_disconnect_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct usb_device *usb_dev; + struct wusbhc *wusbhc; + unsigned command; + u8 port_idx; + + if (sscanf(buf, "%u", &command) != 1) + return -EINVAL; + if (command == 0) + return size; + usb_dev = to_usb_device(dev); + wusbhc = wusbhc_get_by_usb_dev(usb_dev); + if (wusbhc == NULL) + return -ENODEV; + + mutex_lock(&wusbhc->mutex); + port_idx = wusb_port_no_to_idx(usb_dev->portnum); + __wusbhc_dev_disable(wusbhc, port_idx); + mutex_unlock(&wusbhc->mutex); + wusbhc_put(wusbhc); + return size; +} +static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store); + +static ssize_t wusb_cdid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t result; + struct wusb_dev *wusb_dev; + + wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev)); + if (wusb_dev == NULL) + return -ENODEV; + result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid); + strcat(buf, "\n"); + wusb_dev_put(wusb_dev); + return result + 1; +} +static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL); + +static ssize_t wusb_ck_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int result; + struct usb_device *usb_dev; + struct wusbhc *wusbhc; + struct wusb_ckhdid ck; + + result = sscanf(buf, + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx\n", + &ck.data[0] , &ck.data[1], + &ck.data[2] , &ck.data[3], + &ck.data[4] , &ck.data[5], + &ck.data[6] , &ck.data[7], + &ck.data[8] , &ck.data[9], + &ck.data[10], &ck.data[11], + &ck.data[12], &ck.data[13], + &ck.data[14], &ck.data[15]); + if (result != 16) + return -EINVAL; + + usb_dev = to_usb_device(dev); + wusbhc = wusbhc_get_by_usb_dev(usb_dev); + if (wusbhc == NULL) + return -ENODEV; + result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck); + memset(&ck, 0, sizeof(ck)); + wusbhc_put(wusbhc); + return result < 0 ? result : size; +} +static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store); + +static struct attribute *wusb_dev_attrs[] = { + &dev_attr_wusb_disconnect.attr, + &dev_attr_wusb_cdid.attr, + &dev_attr_wusb_ck.attr, + NULL, +}; + +static struct attribute_group wusb_dev_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = wusb_dev_attrs, +}; + +int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev, + struct wusb_dev *wusb_dev) +{ + int result = sysfs_create_group(&usb_dev->dev.kobj, + &wusb_dev_attr_group); + struct device *dev = &usb_dev->dev; + if (result < 0) + dev_err(dev, "Cannot register WUSB-dev attributes: %d\n", + result); + return result; +} + +void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev) +{ + struct usb_device *usb_dev = wusb_dev->usb_dev; + if (usb_dev) + sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group); +} diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c new file mode 100644 index 0000000..cc126b4 --- /dev/null +++ b/drivers/usb/wusbcore/pal.c @@ -0,0 +1,39 @@ +/* + * Wireless USB Host Controller + * UWB Protocol Adaptation Layer (PAL) glue. + * + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include "wusbhc.h" + +/** + * wusbhc_pal_register - register the WUSB HC as a UWB PAL + * @wusbhc: the WUSB HC + */ +int wusbhc_pal_register(struct wusbhc *wusbhc) +{ + uwb_pal_init(&wusbhc->pal); + + return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal); +} + +/** + * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL + * @wusbhc: the WUSB HC + */ +void wusbhc_pal_unregister(struct wusbhc *wusbhc) +{ + uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal); +} diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c new file mode 100644 index 0000000..fc63e77 --- /dev/null +++ b/drivers/usb/wusbcore/reservation.c @@ -0,0 +1,115 @@ +/* + * WUSB cluster reservation management + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +#include "wusbhc.h" + +/* + * WUSB cluster reservations are multicast reservations with the + * broadcast cluster ID (BCID) as the target DevAddr. + * + * FIXME: consider adjusting the reservation depending on what devices + * are attached. + */ + +static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream, + const struct uwb_mas_bm *mas) +{ + if (mas == NULL) + mas = &uwb_mas_bm_zero; + return wusbhc->bwa_set(wusbhc, stream, mas); +} + +/** + * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback + * @rsv: the reservation + * + * Either set or clear the HC's view of the reservation. + * + * FIXME: when a reservation is denied the HC should be stopped. + */ +static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv) +{ + struct wusbhc *wusbhc = rsv->pal_priv; + struct device *dev = wusbhc->dev; + char buf[72]; + + switch (rsv->state) { + case UWB_RSV_STATE_O_ESTABLISHED: + bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); + dev_dbg(dev, "established reservation: %s\n", buf); + wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas); + break; + case UWB_RSV_STATE_NONE: + dev_dbg(dev, "removed reservation\n"); + wusbhc_bwa_set(wusbhc, 0, NULL); + wusbhc->rsv = NULL; + break; + default: + dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); + break; + } +} + + +/** + * wusbhc_rsv_establish - establish a reservation for the cluster + * @wusbhc: the WUSB HC requesting a bandwith reservation + */ +int wusbhc_rsv_establish(struct wusbhc *wusbhc) +{ + struct uwb_rc *rc = wusbhc->uwb_rc; + struct uwb_rsv *rsv; + struct uwb_dev_addr bcid; + int ret; + + rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc); + if (rsv == NULL) + return -ENOMEM; + + bcid.data[0] = wusbhc->cluster_id; + bcid.data[1] = 0; + + rsv->owner = &rc->uwb_dev; + rsv->target.type = UWB_RSV_TARGET_DEVADDR; + rsv->target.devaddr = bcid; + rsv->type = UWB_DRP_TYPE_PRIVATE; + rsv->max_mas = 256; + rsv->min_mas = 16; /* one MAS per zone? */ + rsv->sparsity = 16; /* at least one MAS in each zone? */ + rsv->is_multicast = true; + + ret = uwb_rsv_establish(rsv); + if (ret == 0) + wusbhc->rsv = rsv; + else + uwb_rsv_destroy(rsv); + return ret; +} + + +/** + * wusbhc_rsv_terminate - terminate any cluster reservation + * @wusbhc: the WUSB host whose reservation is to be terminated + */ +void wusbhc_rsv_terminate(struct wusbhc *wusbhc) +{ + if (wusbhc->rsv) + uwb_rsv_terminate(wusbhc->rsv); +} diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c new file mode 100644 index 0000000..267a643 --- /dev/null +++ b/drivers/usb/wusbcore/rh.c @@ -0,0 +1,477 @@ +/* + * Wireless USB Host Controller + * Root Hub operations + * + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * We fake a root hub that has fake ports (as many as simultaneous + * devices the Wireless USB Host Controller can deal with). For each + * port we keep an state in @wusbhc->port[index] identical to the one + * specified in the USB2.0[ch11] spec and some extra device + * information that complements the one in 'struct usb_device' (as + * this lacs a hcpriv pointer). + * + * Note this is common to WHCI and HWA host controllers. + * + * Through here we enable most of the state changes that the USB stack + * will use to connect or disconnect devices. We need to do some + * forced adaptation of Wireless USB device states vs. wired: + * + * USB: WUSB: + * + * Port Powered-off port slot n/a + * Powered-on port slot available + * Disconnected port slot available + * Connected port slot assigned device + * device sent DN_Connect + * device was authenticated + * Enabled device is authenticated, transitioned + * from unauth -> auth -> default address + * -> enabled + * Reset disconnect + * Disable disconnect + * + * This maps the standard USB port states with the WUSB device states + * so we can fake ports without having to modify the USB stack. + * + * FIXME: this process will change in the future + * + * + * ENTRY POINTS + * + * Our entry points into here are, as in hcd.c, the USB stack root hub + * ops defined in the usb_hcd struct: + * + * wusbhc_rh_status_data() Provide hub and port status data bitmap + * + * wusbhc_rh_control() Execution of all the major requests + * you can do to a hub (Set|Clear + * features, get descriptors, status, etc). + * + * wusbhc_rh_[suspend|resume]() That + * + * wusbhc_rh_start_port_reset() ??? unimplemented + */ +#include "wusbhc.h" + +#define D_LOCAL 0 +#include + +/* + * Reset a fake port + * + * This can be called to reset a port from any other state or to reset + * it when connecting. In Wireless USB they are different; when doing + * a new connect that involves going over the authentication. When + * just reseting, its a different story. + * + * The Linux USB stack resets a port twice before it considers it + * enabled, so we have to detect and ignore that. + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + * + * Supposedly we are the only thread accesing @wusbhc->port; in any + * case, maybe we should move the mutex locking from + * wusbhc_devconnect_auth() to here. + * + * @port_idx refers to the wusbhc's port index, not the USB port number + */ +static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx) +{ + int result = 0; + struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); + + d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n", + wusbhc, port_idx); + if (port->reset_count == 0) { + wusbhc_devconnect_auth(wusbhc, port_idx); + port->reset_count++; + } else if (port->reset_count == 1) + /* see header */ + d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx " + "%u\n", port_idx); + else + result = wusbhc_dev_reset(wusbhc, port_idx); + d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n", + wusbhc, port_idx, result); + return result; +} + +/* + * Return the hub change status bitmap + * + * The bits in the change status bitmap are cleared when a + * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4]. + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + * + * WARNING!! This gets called from atomic context; we cannot get the + * mutex--the only race condition we can find is some bit + * changing just after we copy it, which shouldn't be too + * big of a problem [and we can't make it an spinlock + * because other parts need to take it and sleep] . + * + * @usb_hcd is refcounted, so it won't dissapear under us + * and before killing a host, the polling of the root hub + * would be stopped anyway. + */ +int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + size_t cnt, size; + unsigned long *buf = (unsigned long *) _buf; + + d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc); + /* WE DON'T LOCK, see comment */ + size = wusbhc->ports_max + 1 /* hub bit */; + size = (size + 8 - 1) / 8; /* round to bytes */ + for (cnt = 0; cnt < wusbhc->ports_max; cnt++) + if (wusb_port_by_idx(wusbhc, cnt)->change) + set_bit(cnt + 1, buf); + else + clear_bit(cnt + 1, buf); + d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size); + d_dump(1, wusbhc->dev, _buf, size); + return size; +} +EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); + +/* + * Return the hub's desciptor + * + * NOTE: almost cut and paste from ehci-hub.c + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked + */ +static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue, + u16 wIndex, + struct usb_hub_descriptor *descr, + u16 wLength) +{ + u16 temp = 1 + (wusbhc->ports_max / 8); + u8 length = 7 + 2 * temp; + + if (wLength < length) + return -ENOSPC; + descr->bDescLength = 7 + 2 * temp; + descr->bDescriptorType = 0x29; /* HUB type */ + descr->bNbrPorts = wusbhc->ports_max; + descr->wHubCharacteristics = cpu_to_le16( + 0x00 /* All ports power at once */ + | 0x00 /* not part of compound device */ + | 0x10 /* No overcurrent protection */ + | 0x00 /* 8 FS think time FIXME ?? */ + | 0x00); /* No port indicators */ + descr->bPwrOn2PwrGood = 0; + descr->bHubContrCurrent = 0; + /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ + memset(&descr->bitmap[0], 0, temp); + memset(&descr->bitmap[temp], 0xff, temp); + return 0; +} + +/* + * Clear a hub feature + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + * + * Nothing to do, so no locking needed ;) + */ +static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) +{ + int result; + struct device *dev = wusbhc->dev; + + d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature); + switch (feature) { + case C_HUB_LOCAL_POWER: + /* FIXME: maybe plug bit 0 to the power input status, + * if any? + * see wusbhc_rh_get_hub_status() */ + case C_HUB_OVER_CURRENT: + result = 0; + break; + default: + result = -EPIPE; + } + d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result); + return result; +} + +/* + * Return hub status (it is always zero...) + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + * + * Nothing to do, so no locking needed ;) + */ +static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf, + u16 wLength) +{ + /* FIXME: maybe plug bit 0 to the power input status (if any)? */ + *buf = 0; + return 0; +} + +/* + * Set a port feature + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + */ +static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, + u8 selector, u8 port_idx) +{ + int result = -EINVAL; + struct device *dev = wusbhc->dev; + + d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n", + feature, selector, port_idx); + + if (port_idx > wusbhc->ports_max) + goto error; + + switch (feature) { + /* According to USB2.0[11.24.2.13]p2, these features + * are not required to be implemented. */ + case USB_PORT_FEAT_C_OVER_CURRENT: + case USB_PORT_FEAT_C_ENABLE: + case USB_PORT_FEAT_C_SUSPEND: + case USB_PORT_FEAT_C_CONNECTION: + case USB_PORT_FEAT_C_RESET: + result = 0; + break; + + case USB_PORT_FEAT_POWER: + /* No such thing, but we fake it works */ + mutex_lock(&wusbhc->mutex); + wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; + mutex_unlock(&wusbhc->mutex); + result = 0; + break; + case USB_PORT_FEAT_RESET: + result = wusbhc_rh_port_reset(wusbhc, port_idx); + break; + case USB_PORT_FEAT_ENABLE: + case USB_PORT_FEAT_SUSPEND: + dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", + port_idx, feature, selector); + result = -ENOSYS; + break; + default: + dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", + port_idx, feature, selector); + result = -EPIPE; + break; + } +error: + d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n", + feature, selector, port_idx, result); + return result; +} + +/* + * Clear a port feature... + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + */ +static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, + u8 selector, u8 port_idx) +{ + int result = -EINVAL; + struct device *dev = wusbhc->dev; + + d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n", + wusbhc, feature, selector, port_idx); + + if (port_idx > wusbhc->ports_max) + goto error; + + mutex_lock(&wusbhc->mutex); + result = 0; + switch (feature) { + case USB_PORT_FEAT_POWER: /* fake port always on */ + /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ + case USB_PORT_FEAT_C_OVER_CURRENT: + break; + case USB_PORT_FEAT_C_RESET: + wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET; + break; + case USB_PORT_FEAT_C_CONNECTION: + wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION; + break; + case USB_PORT_FEAT_ENABLE: + __wusbhc_dev_disable(wusbhc, port_idx); + break; + case USB_PORT_FEAT_C_ENABLE: + wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE; + break; + case USB_PORT_FEAT_SUSPEND: + case USB_PORT_FEAT_C_SUSPEND: + case 0xffff: /* ??? FIXME */ + dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", + port_idx, feature, selector); + /* dump_stack(); */ + result = -ENOSYS; + break; + default: + dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n", + port_idx, feature, selector); + result = -EPIPE; + break; + } + mutex_unlock(&wusbhc->mutex); +error: + d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = " + "%d\n", wusbhc, feature, selector, port_idx, result); + return result; +} + +/* + * Return the port's status + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + */ +static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, + u32 *_buf, u16 wLength) +{ + int result = -EINVAL; + u16 *buf = (u16 *) _buf; + + d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n", + wusbhc, port_idx, wLength); + if (port_idx > wusbhc->ports_max) + goto error; + mutex_lock(&wusbhc->mutex); + buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); + buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); + result = 0; + mutex_unlock(&wusbhc->mutex); +error: + d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result); + d_dump(1, wusbhc->dev, _buf, wLength); + return result; +} + +/* + * Entry point for Root Hub operations + * + * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. + */ +int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + int result = -ENOSYS; + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + + switch (reqntype) { + case GetHubDescriptor: + result = wusbhc_rh_get_hub_descr( + wusbhc, wValue, wIndex, + (struct usb_hub_descriptor *) buf, wLength); + break; + case ClearHubFeature: + result = wusbhc_rh_clear_hub_feat(wusbhc, wValue); + break; + case GetHubStatus: + result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength); + break; + + case SetPortFeature: + result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8, + (wIndex & 0xff) - 1); + break; + case ClearPortFeature: + result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8, + (wIndex & 0xff) - 1); + break; + case GetPortStatus: + result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1, + (u32 *)buf, wLength); + break; + + case SetHubFeature: + default: + dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) " + "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype, + wValue, wIndex, buf, wLength); + /* dump_stack(); */ + result = -ENOSYS; + } + return result; +} +EXPORT_SYMBOL_GPL(wusbhc_rh_control); + +int wusbhc_rh_suspend(struct usb_hcd *usb_hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, + usb_hcd, wusbhc); + /* dump_stack(); */ + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(wusbhc_rh_suspend); + +int wusbhc_rh_resume(struct usb_hcd *usb_hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, + usb_hcd, wusbhc); + /* dump_stack(); */ + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(wusbhc_rh_resume); + +int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n", + __func__, usb_hcd, wusbhc, port_idx); + WARN_ON(1); + return -ENOSYS; +} +EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset); + +static void wusb_port_init(struct wusb_port *port) +{ + port->status |= USB_PORT_STAT_HIGH_SPEED; +} + +/* + * Alloc fake port specific fields and status. + */ +int wusbhc_rh_create(struct wusbhc *wusbhc) +{ + int result = -ENOMEM; + size_t port_size, itr; + port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]); + wusbhc->port = kzalloc(port_size, GFP_KERNEL); + if (wusbhc->port == NULL) + goto error_port_alloc; + for (itr = 0; itr < wusbhc->ports_max; itr++) + wusb_port_init(&wusbhc->port[itr]); + result = 0; +error_port_alloc: + return result; +} + +void wusbhc_rh_destroy(struct wusbhc *wusbhc) +{ + kfree(wusbhc->port); +} diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c new file mode 100644 index 0000000..1149b1e --- /dev/null +++ b/drivers/usb/wusbcore/wusbhc.c @@ -0,0 +1,416 @@ +/* + * Wireless USB Host Controller + * sysfs glue, wusbcore module support and life cycle management + * + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Creation/destruction of wusbhc is split in two parts; that that + * doesn't require the HCD to be added (wusbhc_{create,destroy}) and + * the one that requires (phase B, wusbhc_b_{create,destroy}). + * + * This is so because usb_add_hcd() will start the HC, and thus, all + * the HC specific stuff has to be already initialiazed (like sysfs + * thingies). + */ +#include +#include +#include "wusbhc.h" + +/** + * Extract the wusbhc that corresponds to a USB Host Controller class device + * + * WARNING! Apply only if @dev is that of a + * wusbhc.usb_hcd.self->class_dev; otherwise, you loose. + */ +static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev) +{ + struct usb_bus *usb_bus = dev_get_drvdata(dev); + struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus); + return usb_hcd_to_wusbhc(usb_hcd); +} + +/* + * Show & store the current WUSB trust timeout + * + * We don't do locking--it is an 'atomic' value. + * + * The units that we store/show are always MILLISECONDS. However, the + * value of trust_timeout is jiffies. + */ +static ssize_t wusb_trust_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout); +} + +static ssize_t wusb_trust_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); + ssize_t result = -ENOSYS; + unsigned trust_timeout; + + result = sscanf(buf, "%u", &trust_timeout); + if (result != 1) { + result = -EINVAL; + goto out; + } + /* FIXME: maybe we should check for range validity? */ + wusbhc->trust_timeout = trust_timeout; + cancel_delayed_work(&wusbhc->keep_alive_timer); + flush_workqueue(wusbd); + queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, + (trust_timeout * CONFIG_HZ)/1000/2); +out: + return result < 0 ? result : size; +} +static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show, + wusb_trust_timeout_store); + +/* + * Show & store the current WUSB CHID + */ +static ssize_t wusb_chid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); + ssize_t result = 0; + + if (wusbhc->wuie_host_info != NULL) + result += ckhdid_printf(buf, PAGE_SIZE, + &wusbhc->wuie_host_info->CHID); + return result; +} + +/* + * Store a new CHID + * + * This will (FIXME) trigger many changes. + * + * - Send an all zeros CHID and it will stop the controller + * - Send a non-zero CHID and it will start it + * (unless it was started, it will just change the CHID, + * diconnecting all devices first). + * + * So first we scan the MMC we are sent and then we act on it. We + * read it in the same format as we print it, an ASCII string of 16 + * hex bytes. + * + * See wusbhc_chid_set() for more info. + */ +static ssize_t wusb_chid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev); + struct wusb_ckhdid chid; + ssize_t result; + + result = sscanf(buf, + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx\n", + &chid.data[0] , &chid.data[1] , + &chid.data[2] , &chid.data[3] , + &chid.data[4] , &chid.data[5] , + &chid.data[6] , &chid.data[7] , + &chid.data[8] , &chid.data[9] , + &chid.data[10], &chid.data[11], + &chid.data[12], &chid.data[13], + &chid.data[14], &chid.data[15]); + if (result != 16) { + dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): " + "%d\n", (int)result); + return -EINVAL; + } + result = wusbhc_chid_set(wusbhc, &chid); + return result < 0 ? result : size; +} +static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store); + +/* Group all the WUSBHC attributes */ +static struct attribute *wusbhc_attrs[] = { + &dev_attr_wusb_trust_timeout.attr, + &dev_attr_wusb_chid.attr, + NULL, +}; + +static struct attribute_group wusbhc_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = wusbhc_attrs, +}; + +/* + * Create a wusbhc instance + * + * NOTEs: + * + * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been + * initialized but not added. + * + * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling. + * + * - fill out wusbhc->uwb_rc and refcount it before calling + * - fill out the wusbhc->sec_modes array + */ +int wusbhc_create(struct wusbhc *wusbhc) +{ + int result = 0; + + wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS; + mutex_init(&wusbhc->mutex); + result = wusbhc_mmcie_create(wusbhc); + if (result < 0) + goto error_mmcie_create; + result = wusbhc_devconnect_create(wusbhc); + if (result < 0) + goto error_devconnect_create; + result = wusbhc_rh_create(wusbhc); + if (result < 0) + goto error_rh_create; + result = wusbhc_sec_create(wusbhc); + if (result < 0) + goto error_sec_create; + result = wusbhc_pal_register(wusbhc); + if (result < 0) + goto error_pal_register; + return 0; + +error_pal_register: + wusbhc_sec_destroy(wusbhc); +error_sec_create: + wusbhc_rh_destroy(wusbhc); +error_rh_create: + wusbhc_devconnect_destroy(wusbhc); +error_devconnect_create: + wusbhc_mmcie_destroy(wusbhc); +error_mmcie_create: + return result; +} +EXPORT_SYMBOL_GPL(wusbhc_create); + +static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc) +{ + return &wusbhc->usb_hcd.self.controller->kobj; +} + +/* + * Phase B of a wusbhc instance creation + * + * Creates fields that depend on wusbhc->usb_hcd having been + * added. This is where we create the sysfs files in + * /sys/class/usb_host/usb_hostX/. + * + * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper + * layer (hwahc or whci) + */ +int wusbhc_b_create(struct wusbhc *wusbhc) +{ + int result = 0; + struct device *dev = wusbhc->usb_hcd.self.controller; + + result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); + if (result < 0) { + dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result); + goto error_create_attr_group; + } + /* Yep, I plan to add stuff here... */ +error_create_attr_group: + return result; +} +EXPORT_SYMBOL_GPL(wusbhc_b_create); + +void wusbhc_b_destroy(struct wusbhc *wusbhc) +{ + sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); +} +EXPORT_SYMBOL_GPL(wusbhc_b_destroy); + +void wusbhc_destroy(struct wusbhc *wusbhc) +{ + wusbhc_pal_unregister(wusbhc); + wusbhc_sec_destroy(wusbhc); + wusbhc_rh_destroy(wusbhc); + wusbhc_devconnect_destroy(wusbhc); + wusbhc_mmcie_destroy(wusbhc); +} +EXPORT_SYMBOL_GPL(wusbhc_destroy); + +struct workqueue_struct *wusbd; +EXPORT_SYMBOL_GPL(wusbd); + +/* + * WUSB Cluster ID allocation map + * + * Each WUSB bus in a channel is identified with a Cluster Id in the + * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff + * (that's space for 31 WUSB controllers, as 0xff can't be taken). We + * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff). + * + * For each one we taken, we pin it in the bitap + */ +#define CLUSTER_IDS 32 +static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS); +static DEFINE_SPINLOCK(wusb_cluster_ids_lock); + +/* + * Get a WUSB Cluster ID + * + * Need to release with wusb_cluster_id_put() when done w/ it. + */ +/* FIXME: coordinate with the choose_addres() from the USB stack */ +/* we want to leave the top of the 128 range for cluster addresses and + * the bottom for device addresses (as we map them one on one with + * ports). */ +u8 wusb_cluster_id_get(void) +{ + u8 id; + spin_lock(&wusb_cluster_ids_lock); + id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS); + if (id > CLUSTER_IDS) { + id = 0; + goto out; + } + set_bit(id, wusb_cluster_id_table); + id = (u8) 0xff - id; +out: + spin_unlock(&wusb_cluster_ids_lock); + return id; + +} +EXPORT_SYMBOL_GPL(wusb_cluster_id_get); + +/* + * Release a WUSB Cluster ID + * + * Obtained it with wusb_cluster_id_get() + */ +void wusb_cluster_id_put(u8 id) +{ + id = 0xff - id; + BUG_ON(id >= CLUSTER_IDS); + spin_lock(&wusb_cluster_ids_lock); + WARN_ON(!test_bit(id, wusb_cluster_id_table)); + clear_bit(id, wusb_cluster_id_table); + spin_unlock(&wusb_cluster_ids_lock); +} +EXPORT_SYMBOL_GPL(wusb_cluster_id_put); + +/** + * wusbhc_giveback_urb - return an URB to the USB core + * @wusbhc: the host controller the URB is from. + * @urb: the URB. + * @status: the URB's status. + * + * Return an URB to the USB core doing some additional WUSB specific + * processing. + * + * - After a successful transfer, update the trust timeout timestamp + * for the WUSB device. + * + * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion + * condition for the WCONNECTACK_IE is that the host has observed + * the associated device responding to a control transfer. + */ +void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status) +{ + struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); + + if (status == 0) { + wusb_dev->entry_ts = jiffies; + + /* wusbhc_devconnect_acked() can't be called from from + atomic context so defer it to a work queue. */ + if (!list_empty(&wusb_dev->cack_node)) + queue_work(wusbd, &wusb_dev->devconnect_acked_work); + } + + usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status); +} +EXPORT_SYMBOL_GPL(wusbhc_giveback_urb); + +/** + * wusbhc_reset_all - reset the HC hardware + * @wusbhc: the host controller to reset. + * + * Request a full hardware reset of the chip. This will also reset + * the radio controller and any other PALs. + */ +void wusbhc_reset_all(struct wusbhc *wusbhc) +{ + uwb_rc_reset_all(wusbhc->uwb_rc); +} +EXPORT_SYMBOL_GPL(wusbhc_reset_all); + +static struct notifier_block wusb_usb_notifier = { + .notifier_call = wusb_usb_ncb, + .priority = INT_MAX /* Need to be called first of all */ +}; + +static int __init wusbcore_init(void) +{ + int result; + result = wusb_crypto_init(); + if (result < 0) + goto error_crypto_init; + /* WQ is singlethread because we need to serialize notifications */ + wusbd = create_singlethread_workqueue("wusbd"); + if (wusbd == NULL) { + result = -ENOMEM; + printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n"); + goto error_wusbd_create; + } + usb_register_notify(&wusb_usb_notifier); + bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS); + set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */ + return 0; + +error_wusbd_create: + wusb_crypto_exit(); +error_crypto_init: + return result; + +} +module_init(wusbcore_init); + +static void __exit wusbcore_exit(void) +{ + clear_bit(0, wusb_cluster_id_table); + if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) { + char buf[256]; + bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table, + CLUSTER_IDS); + printk(KERN_ERR "BUG: WUSB Cluster IDs not released " + "on exit: %s\n", buf); + WARN_ON(1); + } + usb_unregister_notify(&wusb_usb_notifier); + destroy_workqueue(wusbd); + wusb_crypto_exit(); +} +module_exit(wusbcore_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Wireless USB core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h new file mode 100644 index 0000000..d0c1324 --- /dev/null +++ b/drivers/usb/wusbcore/wusbhc.h @@ -0,0 +1,495 @@ +/* + * Wireless USB Host Controller + * Common infrastructure for WHCI and HWA WUSB-HC drivers + * + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This driver implements parts common to all Wireless USB Host + * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used + * by: + * + * - hwahc: HWA, USB-dongle that implements a Wireless USB host + * controller, (Wireless USB 1.0 Host-Wire-Adapter specification). + * + * - whci: WHCI, a PCI card with a wireless host controller + * (Wireless Host Controller Interface 1.0 specification). + * + * Check out the Design-overview.txt file in the source documentation + * for other details on the implementation. + * + * Main blocks: + * + * rh Root Hub emulation (part of the HCD glue) + * + * devconnect Handle all the issues related to device connection, + * authentication, disconnection, timeout, reseting, + * keepalives, etc. + * + * mmc MMC IE broadcasting handling + * + * A host controller driver just initializes its stuff and as part of + * that, creates a 'struct wusbhc' instance that handles all the + * common WUSB mechanisms. Links in the function ops that are specific + * to it and then registers the host controller. Ready to run. + */ + +#ifndef __WUSBHC_H__ +#define __WUSBHC_H__ + +#include +#include +#include +#include +#include +/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not + * public */ +#include +#include +#include + + +/** + * Wireless USB device + * + * Describe a WUSB device connected to the cluster. This struct + * belongs to the 'struct wusb_port' it is attached to and it is + * responsible for putting and clearing the pointer to it. + * + * Note this "complements" the 'struct usb_device' that the usb_hcd + * keeps for each connected USB device. However, it extends some + * information that is not available (there is no hcpriv ptr in it!) + * *and* most importantly, it's life cycle is different. It is created + * as soon as we get a DN_Connect (connect request notification) from + * the device through the WUSB host controller; the USB stack doesn't + * create the device until we authenticate it. FIXME: this will + * change. + * + * @bos: This is allocated when the BOS descriptors are read from + * the device and freed upon the wusb_dev struct dying. + * @wusb_cap_descr: points into @bos, and has been verified to be size + * safe. + */ +struct wusb_dev { + struct kref refcnt; + struct wusbhc *wusbhc; + struct list_head cack_node; /* Connect-Ack list */ + u8 port_idx; + u8 addr; + u8 beacon_type:4; + struct usb_encryption_descriptor ccm1_etd; + struct wusb_ckhdid cdid; + unsigned long entry_ts; + struct usb_bos_descriptor *bos; + struct usb_wireless_cap_descriptor *wusb_cap_descr; + struct uwb_mas_bm availability; + struct work_struct devconnect_acked_work; + struct urb *set_gtk_urb; + struct usb_ctrlrequest *set_gtk_req; + struct usb_device *usb_dev; +}; + +#define WUSB_DEV_ADDR_UNAUTH 0x80 + +static inline void wusb_dev_init(struct wusb_dev *wusb_dev) +{ + kref_init(&wusb_dev->refcnt); + /* no need to init the cack_node */ +} + +extern void wusb_dev_destroy(struct kref *_wusb_dev); + +static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev) +{ + kref_get(&wusb_dev->refcnt); + return wusb_dev; +} + +static inline void wusb_dev_put(struct wusb_dev *wusb_dev) +{ + kref_put(&wusb_dev->refcnt, wusb_dev_destroy); +} + +/** + * Wireless USB Host Controlller root hub "fake" ports + * (state and device information) + * + * Wireless USB is wireless, so there are no ports; but we + * fake'em. Each RC can connect a max of devices at the same time + * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's + * caps), referred to in wusbhc->ports_max. + * + * See rh.c for more information. + * + * The @status and @change use the same bits as in USB2.0[11.24.2.7], + * so we don't have to do much when getting the port's status. + * + * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10], + * include/linux/usb_ch9.h (#define USB_PORT_STAT_*) + */ +struct wusb_port { + u16 status; + u16 change; + struct wusb_dev *wusb_dev; /* connected device's info */ + unsigned reset_count; + u32 ptk_tkid; +}; + +/** + * WUSB Host Controller specifics + * + * All fields that are common to all Wireless USB controller types + * (HWA and WHCI) are grouped here. Host Controller + * functions/operations that only deal with general Wireless USB HC + * issues use this data type to refer to the host. + * + * @usb_hcd Instantiation of a USB host controller + * (initialized by upper layer [HWA=HC or WHCI]. + * + * @dev Device that implements this; initialized by the + * upper layer (HWA-HC, WHCI...); this device should + * have a refcount. + * + * @trust_timeout After this time without hearing for device + * activity, we consider the device gone and we have to + * re-authenticate. + * + * Can be accessed w/o locking--however, read to a + * local variable then use. + * + * @chid WUSB Cluster Host ID: this is supposed to be a + * unique value that doesn't change across reboots (so + * that your devices do not require re-association). + * + * Read/Write protected by @mutex + * + * @dev_info This array has ports_max elements. It is used to + * give the HC information about the WUSB devices (see + * 'struct wusb_dev_info'). + * + * For HWA we need to allocate it in heap; for WHCI it + * needs to be permanently mapped, so we keep it for + * both and make it easy. Call wusbhc->dev_info_set() + * to update an entry. + * + * @ports_max Number of simultaneous device connections (fake + * ports) this HC will take. Read-only. + * + * @port Array of port status for each fake root port. Guaranteed to + * always be the same lenght during device existence + * [this allows for some unlocked but referenced reading]. + * + * @mmcies_max Max number of Information Elements this HC can send + * in its MMC. Read-only. + * + * @mmcie_add HC specific operation (WHCI or HWA) for adding an + * MMCIE. + * + * @mmcie_rm HC specific operation (WHCI or HWA) for removing an + * MMCIE. + * + * @enc_types Array which describes the encryptions methods + * supported by the host as described in WUSB1.0 -- + * one entry per supported method. As of WUSB1.0 there + * is only four methods, we make space for eight just in + * case they decide to add some more (and pray they do + * it in sequential order). if 'enc_types[enc_method] + * != 0', then it is supported by the host. enc_method + * is USB_ENC_TYPE*. + * + * @set_ptk: Set the PTK and enable encryption for a device. Or, if + * the supplied key is NULL, disable encryption for that + * device. + * + * @set_gtk: Set the GTK to be used for all future broadcast packets + * (i.e., MMCs). With some hardware, setting the GTK may start + * MMC transmission. + * + * NOTE: + * + * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid + * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev + * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a + * refcount on it). + * + * Most of the times when you need to use it, it will be non-NULL, + * so there is no real need to check for it (wusb_dev will + * dissapear before usb_dev). + * + * - The following fields need to be filled out before calling + * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}. + * + * - there is no wusbhc_init() method, we do everything in + * wusbhc_create(). + * + * - Creation is done in two phases, wusbhc_create() and + * wusbhc_create_b(); b are the parts that need to be called after + * calling usb_hcd_add(&wusbhc->usb_hcd). + */ +struct wusbhc { + struct usb_hcd usb_hcd; /* HAS TO BE 1st */ + struct device *dev; + struct uwb_rc *uwb_rc; + struct uwb_pal pal; + + unsigned trust_timeout; /* in jiffies */ + struct wuie_host_info *wuie_host_info; /* Includes CHID */ + + struct mutex mutex; /* locks everything else */ + u16 cluster_id; /* Wireless USB Cluster ID */ + struct wusb_port *port; /* Fake port status handling */ + struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */ + u8 ports_max; + unsigned active:1; /* currently xmit'ing MMCs */ + struct wuie_keep_alive keep_alive_ie; /* protected by mutex */ + struct delayed_work keep_alive_timer; + struct list_head cack_list; /* Connect acknowledging */ + size_t cack_count; /* protected by 'mutex' */ + struct wuie_connect_ack cack_ie; + struct uwb_rsv *rsv; /* cluster bandwidth reservation */ + + struct mutex mmcie_mutex; /* MMC WUIE handling */ + struct wuie_hdr **mmcie; /* WUIE array */ + u8 mmcies_max; + /* FIXME: make wusbhc_ops? */ + int (*start)(struct wusbhc *wusbhc); + void (*stop)(struct wusbhc *wusbhc); + int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, + u8 handle, struct wuie_hdr *wuie); + int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); + int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev); + int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index, + const struct uwb_mas_bm *); + int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx, + u32 tkid, const void *key, size_t key_size); + int (*set_gtk)(struct wusbhc *wusbhc, + u32 tkid, const void *key, size_t key_size); + int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots); + + struct { + struct usb_key_descriptor descr; + u8 data[16]; /* GTK key data */ + } __attribute__((packed)) gtk; + u8 gtk_index; + u32 gtk_tkid; + struct work_struct gtk_rekey_done_work; + int pending_set_gtks; + + struct usb_encryption_descriptor *ccm1_etd; +}; + +#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd) + + +extern int wusbhc_create(struct wusbhc *); +extern int wusbhc_b_create(struct wusbhc *); +extern void wusbhc_b_destroy(struct wusbhc *); +extern void wusbhc_destroy(struct wusbhc *); +extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *, + struct wusb_dev *); +extern void wusb_dev_sysfs_rm(struct wusb_dev *); +extern int wusbhc_sec_create(struct wusbhc *); +extern int wusbhc_sec_start(struct wusbhc *); +extern void wusbhc_sec_stop(struct wusbhc *); +extern void wusbhc_sec_destroy(struct wusbhc *); +extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, + int status); +void wusbhc_reset_all(struct wusbhc *wusbhc); + +int wusbhc_pal_register(struct wusbhc *wusbhc); +void wusbhc_pal_unregister(struct wusbhc *wusbhc); + +/* + * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone + * + * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr) + * + * This is a safe assumption as @usb_dev->bus is referenced all the + * time during the @usb_dev life cycle. + */ +static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev) +{ + struct usb_hcd *usb_hcd; + usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self); + return usb_get_hcd(usb_hcd); +} + +/* + * Increment the reference count on a wusbhc. + * + * @wusbhc's life cycle is identical to that of the underlying usb_hcd. + */ +static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc) +{ + return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL; +} + +/* + * Return the wusbhc associated to a @usb_dev + * + * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr) + * + * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down. + * WARNING: referenced at the usb_hcd level, unlocked + * + * FIXME: move offline + */ +static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev) +{ + struct wusbhc *wusbhc = NULL; + struct usb_hcd *usb_hcd; + if (usb_dev->devnum > 1 && !usb_dev->wusb) { + /* but root hubs */ + dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum, + usb_dev->wusb); + BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb); + } + usb_hcd = usb_hcd_get_by_usb_dev(usb_dev); + if (usb_hcd == NULL) + return NULL; + BUG_ON(usb_hcd->wireless == 0); + return wusbhc = usb_hcd_to_wusbhc(usb_hcd); +} + + +static inline void wusbhc_put(struct wusbhc *wusbhc) +{ + usb_put_hcd(&wusbhc->usb_hcd); +} + +int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid); +void wusbhc_stop(struct wusbhc *wusbhc); +extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); + +/* Device connect handling */ +extern int wusbhc_devconnect_create(struct wusbhc *); +extern void wusbhc_devconnect_destroy(struct wusbhc *); +extern int wusbhc_devconnect_start(struct wusbhc *wusbhc, + const struct wusb_ckhdid *chid); +extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); +extern int wusbhc_devconnect_auth(struct wusbhc *, u8); +extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, + struct wusb_dn_hdr *dn_hdr, size_t size); +extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port); +extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); +extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, + void *priv); +extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, + u8 addr); + +/* Wireless USB fake Root Hub methods */ +extern int wusbhc_rh_create(struct wusbhc *); +extern void wusbhc_rh_destroy(struct wusbhc *); + +extern int wusbhc_rh_status_data(struct usb_hcd *, char *); +extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16); +extern int wusbhc_rh_suspend(struct usb_hcd *); +extern int wusbhc_rh_resume(struct usb_hcd *); +extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned); + +/* MMC handling */ +extern int wusbhc_mmcie_create(struct wusbhc *); +extern void wusbhc_mmcie_destroy(struct wusbhc *); +extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt, + struct wuie_hdr *); +extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *); + +/* Bandwidth reservation */ +int wusbhc_rsv_establish(struct wusbhc *wusbhc); +void wusbhc_rsv_terminate(struct wusbhc *wusbhc); + +/* + * I've always said + * I wanted a wedding in a church... + * + * but lately I've been thinking about + * the Botanical Gardens. + * + * We could do it by the tulips. + * It'll be beautiful + * + * --Security! + */ +extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *, + struct wusb_dev *); +extern void wusb_dev_sec_rm(struct wusb_dev *) ; +extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, + struct wusb_ckhdid *ck); +void wusbhc_gtk_rekey(struct wusbhc *wusbhc); + + +/* WUSB Cluster ID handling */ +extern u8 wusb_cluster_id_get(void); +extern void wusb_cluster_id_put(u8); + +/* + * wusb_port_by_idx - return the port associated to a zero-based port index + * + * NOTE: valid without locking as long as wusbhc is referenced (as the + * number of ports doesn't change). The data pointed to has to + * be verified though :) + */ +static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc, + u8 port_idx) +{ + return &wusbhc->port[port_idx]; +} + +/* + * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to + * a port_idx. + * + * USB stack USB ports are 1 based!! + * + * NOTE: only valid for WUSB devices!!! + */ +static inline u8 wusb_port_no_to_idx(u8 port_no) +{ + return port_no - 1; +} + +extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *, + struct usb_device *); + +/* + * Return a referenced wusb_dev given a @usb_dev + * + * Returns NULL if the usb_dev is being torn down. + * + * FIXME: move offline + */ +static inline +struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev) +{ + struct wusbhc *wusbhc; + struct wusb_dev *wusb_dev; + wusbhc = wusbhc_get_by_usb_dev(usb_dev); + if (wusbhc == NULL) + return NULL; + mutex_lock(&wusbhc->mutex); + wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); + mutex_unlock(&wusbhc->mutex); + wusbhc_put(wusbhc); + return wusb_dev; +} + +/* Misc */ + +extern struct workqueue_struct *wusbd; +#endif /* #ifndef __WUSBHC_H__ */ -- cgit v0.10.2 From b69fada68b92fa7061d59a3e54b428759a5e5717 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:24 +0100 Subject: wusb: add the Wireless USB core (protocol) Add the WUSB protocol (MMC management and device connection) code. Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c new file mode 100644 index 0000000..f05f9b4 --- /dev/null +++ b/drivers/usb/wusbcore/devconnect.c @@ -0,0 +1,1314 @@ +/* + * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) + * Device Connect handling + * + * Copyright (C) 2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * FIXME: this file needs to be broken up, it's grown too big + * + * + * WUSB1.0[7.1, 7.5.1, ] + * + * WUSB device connection is kind of messy. Some background: + * + * When a device wants to connect it scans the UWB radio channels + * looking for a WUSB Channel; a WUSB channel is defined by MMCs + * (Micro Managed Commands or something like that) [see + * Design-overview for more on this] . + * + * So, device scans the radio, finds MMCs and thus a host and checks + * when the next DNTS is. It sends a Device Notification Connect + * (DN_Connect); the host picks it up (through nep.c and notif.c, ends + * up in wusb_devconnect_ack(), which creates a wusb_dev structure in + * wusbhc->port[port_number].wusb_dev), assigns an unauth address + * to the device (this means from 0x80 to 0xfe) and sends, in the MMC + * a Connect Ack Information Element (ConnAck IE). + * + * So now the device now has a WUSB address. From now on, we use + * that to talk to it in the RPipes. + * + * ASSUMPTIONS: + * + * - We use the the as device address the port number where it is + * connected (port 0 doesn't exist). For unauth, it is 128 + that. + * + * ROADMAP: + * + * This file contains the logic for doing that--entry points: + * + * wusb_devconnect_ack() Ack a device until _acked() called. + * Called by notif.c:wusb_handle_dn_connect() + * when a DN_Connect is received. + * + * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when + * doing the device connect sequence. + * + * wusb_devconnect_acked() Ack done, release resources. + * + * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() + * for processing a DN_Alive pong from a device. + * + * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to + * process a disconenct request from a + * device. + * + * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when + * resetting a device. + * + * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when + * disabling a port. + * + * wusb_devconnect_create() Called when creating the host by + * lc.c:wusbhc_create(). + * + * wusb_devconnect_destroy() Cleanup called removing the host. Called + * by lc.c:wusbhc_destroy(). + * + * Each Wireless USB host maintains a list of DN_Connect requests + * (actually we maintain a list of pending Connect Acks, the + * wusbhc->ca_list). + * + * LIFE CYCLE OF port->wusb_dev + * + * Before the @wusbhc structure put()s the reference it owns for + * port->wusb_dev [and clean the wusb_dev pointer], it needs to + * lock @wusbhc->mutex. + */ + +#include +#include +#include +#include "wusbhc.h" + +#undef D_LOCAL +#define D_LOCAL 1 +#include + +static void wusbhc_devconnect_acked_work(struct work_struct *work); + +static void wusb_dev_free(struct wusb_dev *wusb_dev) +{ + if (wusb_dev) { + kfree(wusb_dev->set_gtk_req); + usb_free_urb(wusb_dev->set_gtk_urb); + kfree(wusb_dev); + } +} + +static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) +{ + struct wusb_dev *wusb_dev; + struct urb *urb; + struct usb_ctrlrequest *req; + + wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); + if (wusb_dev == NULL) + goto err; + + wusb_dev->wusbhc = wusbhc; + + INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) + goto err; + + req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (req == NULL) + goto err; + + req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; + req->bRequest = USB_REQ_SET_DESCRIPTOR; + req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); + req->wIndex = 0; + req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); + + wusb_dev->set_gtk_urb = urb; + wusb_dev->set_gtk_req = req; + + return wusb_dev; +err: + wusb_dev_free(wusb_dev); + return NULL; +} + + +/* + * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE + * properly so that it can be added to the MMC. + * + * We just get the @wusbhc->ca_list and fill out the first four ones or + * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB + * IE is not allocated, we alloc it. + * + * @wusbhc->mutex must be taken + */ +static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc) +{ + unsigned cnt; + struct wusb_dev *dev_itr; + struct wuie_connect_ack *cack_ie; + + cack_ie = &wusbhc->cack_ie; + cnt = 0; + list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) { + cack_ie->blk[cnt].CDID = dev_itr->cdid; + cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr; + if (++cnt >= WUIE_ELT_MAX) + break; + } + cack_ie->hdr.bLength = sizeof(cack_ie->hdr) + + cnt * sizeof(cack_ie->blk[0]); +} + +/* + * Register a new device that wants to connect + * + * A new device wants to connect, so we add it to the Connect-Ack + * list. We give it an address in the unauthorized range (bit 8 set); + * user space will have to drive authorization further on. + * + * @dev_addr: address to use for the device (which is also the port + * number). + * + * @wusbhc->mutex must be taken + */ +static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, + struct wusb_dn_connect *dnc, + const char *pr_cdid, u8 port_idx) +{ + struct device *dev = wusbhc->dev; + struct wusb_dev *wusb_dev; + int new_connection = wusb_dn_connect_new_connection(dnc); + u8 dev_addr; + int result; + + d_fnstart(3, dev, "(wusbhc %p port_idx %d)\n", wusbhc, port_idx); + /* Is it registered already? */ + list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node) + if (!memcmp(&wusb_dev->cdid, &dnc->CDID, + sizeof(wusb_dev->cdid))) + return wusb_dev; + /* We don't have it, create an entry, register it */ + wusb_dev = wusb_dev_alloc(wusbhc); + if (wusb_dev == NULL) { + if (printk_ratelimit()) + dev_err(dev, "DN CONNECT: no memory to process %s's %s " + "request\n", pr_cdid, + new_connection ? "connect" : "reconnect"); + return NULL; + } + wusb_dev_init(wusb_dev); + wusb_dev->cdid = dnc->CDID; + wusb_dev->port_idx = port_idx; + + /* + * Devices are always available within the cluster reservation + * and since the hardware will take the intersection of the + * per-device availability and the cluster reservation, the + * per-device availability can simply be set to always + * available. + */ + bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS); + + /* FIXME: handle reconnects instead of assuming connects are + always new. */ + if (1 && new_connection == 0) + new_connection = 1; + if (new_connection) { + dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH; + + dev_info(dev, "Connecting new WUSB device to address %u, " + "port %u\n", dev_addr, port_idx); + + result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr); + if (result) + return NULL; + } + wusb_dev->entry_ts = jiffies; + list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); + wusbhc->cack_count++; + wusbhc_fill_cack_ie(wusbhc); + d_fnend(3, dev, "(wusbhc %p port_idx %d)\n", wusbhc, port_idx); + return wusb_dev; +} + +/* + * Remove a Connect-Ack context entry from the HCs view + * + * @wusbhc->mutex must be taken + */ +static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + struct device *dev = wusbhc->dev; + d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); + list_del_init(&wusb_dev->cack_node); + wusbhc->cack_count--; + wusbhc_fill_cack_ie(wusbhc); + d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); +} + +/* + * @wusbhc->mutex must be taken */ +static +void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + struct device *dev = wusbhc->dev; + d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); + wusbhc_cack_rm(wusbhc, wusb_dev); + if (wusbhc->cack_count) + wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); + else + wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); + d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); +} + +static void wusbhc_devconnect_acked_work(struct work_struct *work) +{ + struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev, + devconnect_acked_work); + struct wusbhc *wusbhc = wusb_dev->wusbhc; + + mutex_lock(&wusbhc->mutex); + wusbhc_devconnect_acked(wusbhc, wusb_dev); + mutex_unlock(&wusbhc->mutex); +} + +/* + * Ack a device for connection + * + * FIXME: docs + * + * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal. + * + * So we get the connect ack IE (may have been allocated already), + * find an empty connect block, an empty virtual port, create an + * address with it (see below), make it an unauth addr [bit 7 set] and + * set the MMC. + * + * Addresses: because WUSB hosts have no downstream hubs, we can do a + * 1:1 mapping between 'port number' and device + * address. This simplifies many things, as during this + * initial connect phase the USB stack has no knoledge of + * the device and hasn't assigned an address yet--we know + * USB's choose_address() will use the same euristics we + * use here, so we can assume which address will be assigned. + * + * USB stack always assigns address 1 to the root hub, so + * to the port number we add 2 (thus virtual port #0 is + * addr #2). + * + * @wusbhc shall be referenced + */ +static +void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, + const char *pr_cdid) +{ + int result; + struct device *dev = wusbhc->dev; + struct wusb_dev *wusb_dev; + struct wusb_port *port; + unsigned idx, devnum; + + d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid); + mutex_lock(&wusbhc->mutex); + + /* Check we are not handling it already */ + for (idx = 0; idx < wusbhc->ports_max; idx++) { + port = wusb_port_by_idx(wusbhc, idx); + if (port->wusb_dev + && !memcmp(&dnc->CDID, &port->wusb_dev->cdid, + sizeof(dnc->CDID))) { + if (printk_ratelimit()) + dev_err(dev, "Already handling dev %s " + " (it might be slow)\n", pr_cdid); + goto error_unlock; + } + } + /* Look up those fake ports we have for a free one */ + for (idx = 0; idx < wusbhc->ports_max; idx++) { + port = wusb_port_by_idx(wusbhc, idx); + if ((port->status & USB_PORT_STAT_POWER) + && !(port->status & USB_PORT_STAT_CONNECTION)) + break; + } + if (idx >= wusbhc->ports_max) { + dev_err(dev, "Host controller can't connect more devices " + "(%u already connected); device %s rejected\n", + wusbhc->ports_max, pr_cdid); + /* NOTE: we could send a WUIE_Disconnect here, but we haven't + * event acked, so the device will eventually timeout the + * connection, right? */ + goto error_unlock; + } + + devnum = idx + 2; + + /* Make sure we are using no crypto on that "virtual port" */ + wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0); + + /* Grab a filled in Connect-Ack context, fill out the + * Connect-Ack Wireless USB IE, set the MMC */ + wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx); + if (wusb_dev == NULL) + goto error_unlock; + result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); + if (result < 0) + goto error_unlock; + /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do + * three for a good measure */ + msleep(3); + port->wusb_dev = wusb_dev; + port->status |= USB_PORT_STAT_CONNECTION; + port->change |= USB_PORT_STAT_C_CONNECTION; + port->reset_count = 0; + /* Now the port status changed to connected; khubd will + * pick the change up and try to reset the port to bring it to + * the enabled state--so this process returns up to the stack + * and it calls back into wusbhc_rh_port_reset() who will call + * devconnect_auth(). + */ +error_unlock: + mutex_unlock(&wusbhc->mutex); + d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid); + return; + +} + +/* + * Disconnect a Wireless USB device from its fake port + * + * Marks the port as disconnected so that khubd can pick up the change + * and drops our knowledge about the device. + * + * Assumes there is a device connected + * + * @port_index: zero based port number + * + * NOTE: @wusbhc->mutex is locked + * + * WARNING: From here it is not very safe to access anything hanging off + * wusb_dev + */ +static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, + struct wusb_port *port) +{ + struct device *dev = wusbhc->dev; + struct wusb_dev *wusb_dev = port->wusb_dev; + + d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port); + port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE + | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET + | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); + port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE; + if (wusb_dev) { + if (!list_empty(&wusb_dev->cack_node)) + list_del_init(&wusb_dev->cack_node); + /* For the one in cack_add() */ + wusb_dev_put(wusb_dev); + } + port->wusb_dev = NULL; + /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get + * confused! We only reset to zero when we connect a new device. + */ + + /* After a device disconnects, change the GTK (see [WUSB] + * section 6.2.11.2). */ + wusbhc_gtk_rekey(wusbhc); + + d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port); + /* The Wireless USB part has forgotten about the device already; now + * khubd's timer will pick up the disconnection and remove the USB + * device from the system + */ +} + +/* + * Authenticate a device into the WUSB Cluster + * + * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when + * asking for a reset on a port that is not enabled (ie: first connect + * on the port). + * + * Performs the 4way handshake to allow the device to comunicate w/ the + * WUSB Cluster securely; once done, issue a request to the device for + * it to change to address 0. + * + * This mimics the reset step of Wired USB that once resetting a + * device, leaves the port in enabled state and the dev with the + * default address (0). + * + * WUSB1.0[7.1.2] + * + * @port_idx: port where the change happened--This is the index into + * the wusbhc port array, not the USB port number. + */ +int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx) +{ + struct device *dev = wusbhc->dev; + struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); + + d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); + port->status &= ~USB_PORT_STAT_RESET; + port->status |= USB_PORT_STAT_ENABLE; + port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; + d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx); + return 0; +} + +/* + * Refresh the list of keep alives to emit in the MMC + * + * Some devices don't respond to keep alives unless they've been + * authenticated, so skip unauthenticated devices. + * + * We only publish the first four devices that have a coming timeout + * condition. Then when we are done processing those, we go for the + * next ones. We ignore the ones that have timed out already (they'll + * be purged). + * + * This might cause the first devices to timeout the last devices in + * the port array...FIXME: come up with a better algorithm? + * + * Note we can't do much about MMC's ops errors; we hope next refresh + * will kind of handle it. + * + * NOTE: @wusbhc->mutex is locked + */ +static void __wusbhc_keep_alive(struct wusbhc *wusbhc) +{ + int result; + struct device *dev = wusbhc->dev; + unsigned cnt; + struct wusb_dev *wusb_dev; + struct wusb_port *wusb_port; + struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie; + unsigned keep_alives, old_keep_alives; + + d_fnstart(5, dev, "(wusbhc %p)\n", wusbhc); + old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); + keep_alives = 0; + for (cnt = 0; + keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; + cnt++) { + unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); + + wusb_port = wusb_port_by_idx(wusbhc, cnt); + wusb_dev = wusb_port->wusb_dev; + + if (wusb_dev == NULL) + continue; + if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) + continue; + + if (time_after(jiffies, wusb_dev->entry_ts + tt)) { + dev_err(dev, "KEEPALIVE: device %u timed out\n", + wusb_dev->addr); + __wusbhc_dev_disconnect(wusbhc, wusb_port); + } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) { + /* Approaching timeout cut out, need to refresh */ + ie->bDeviceAddress[keep_alives++] = wusb_dev->addr; + } + } + if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */ + ie->bDeviceAddress[keep_alives++] = 0x7f; + ie->hdr.bLength = sizeof(ie->hdr) + + keep_alives*sizeof(ie->bDeviceAddress[0]); + if (keep_alives > 0) { + result = wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr); + if (result < 0 && printk_ratelimit()) + dev_err(dev, "KEEPALIVE: can't set MMC: %d\n", result); + } else if (old_keep_alives != 0) + wusbhc_mmcie_rm(wusbhc, &ie->hdr); + d_fnend(5, dev, "(wusbhc %p) = void\n", wusbhc); +} + +/* + * Do a run through all devices checking for timeouts + */ +static void wusbhc_keep_alive_run(struct work_struct *ws) +{ + struct delayed_work *dw = + container_of(ws, struct delayed_work, work); + struct wusbhc *wusbhc = + container_of(dw, struct wusbhc, keep_alive_timer); + + d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc); + if (wusbhc->active) { + mutex_lock(&wusbhc->mutex); + __wusbhc_keep_alive(wusbhc); + mutex_unlock(&wusbhc->mutex); + queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, + (wusbhc->trust_timeout * CONFIG_HZ)/1000/2); + } + d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); + return; +} + +/* + * Find the wusb_dev from its device address. + * + * The device can be found directly from the address (see + * wusb_cack_add() for where the device address is set to port_idx + * +2), except when the address is zero. + */ +static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) +{ + int p; + + if (addr == 0xff) /* unconnected */ + return NULL; + + if (addr > 0) { + int port = (addr & ~0x80) - 2; + if (port < 0 || port >= wusbhc->ports_max) + return NULL; + return wusb_port_by_idx(wusbhc, port)->wusb_dev; + } + + /* Look for the device with address 0. */ + for (p = 0; p < wusbhc->ports_max; p++) { + struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev; + if (wusb_dev && wusb_dev->addr == addr) + return wusb_dev; + } + return NULL; +} + +/* + * Handle a DN_Alive notification (WUSB1.0[7.6.1]) + * + * This just updates the device activity timestamp and then refreshes + * the keep alive IE. + * + * @wusbhc shall be referenced and unlocked + */ +static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + struct device *dev = wusbhc->dev; + + d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr); + + mutex_lock(&wusbhc->mutex); + wusb_dev->entry_ts = jiffies; + __wusbhc_keep_alive(wusbhc); + mutex_unlock(&wusbhc->mutex); +} + +/* + * Handle a DN_Connect notification (WUSB1.0[7.6.1]) + * + * @wusbhc + * @pkt_hdr + * @size: Size of the buffer where the notification resides; if the + * notification data suggests there should be more data than + * available, an error will be signaled and the whole buffer + * consumed. + * + * @wusbhc->mutex shall be held + */ +static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, + struct wusb_dn_hdr *dn_hdr, + size_t size) +{ + struct device *dev = wusbhc->dev; + struct wusb_dn_connect *dnc; + char pr_cdid[WUSB_CKHDID_STRSIZE]; + static const char *beacon_behaviour[] = { + "reserved", + "self-beacon", + "directed-beacon", + "no-beacon" + }; + + d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size); + if (size < sizeof(*dnc)) { + dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", + size, sizeof(*dnc)); + goto out; + } + + dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID); + dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n", + pr_cdid, + wusb_dn_connect_prev_dev_addr(dnc), + beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)], + wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); + /* ACK the connect */ + wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); +out: + d_fnend(3, dev, "(%p, %p, %zu) = void\n", + wusbhc, dn_hdr, size); + return; +} + +/* + * Handle a DN_Disconnect notification (WUSB1.0[7.6.1]) + * + * Device is going down -- do the disconnect. + * + * @wusbhc shall be referenced and unlocked + */ +static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + struct device *dev = wusbhc->dev; + + dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); + + mutex_lock(&wusbhc->mutex); + __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); + mutex_unlock(&wusbhc->mutex); +} + +/* + * Reset a WUSB device on a HWA + * + * @wusbhc + * @port_idx Index of the port where the device is + * + * In Wireless USB, a reset is more or less equivalent to a full + * disconnect; so we just do a full disconnect and send the device a + * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs). + * + * @wusbhc should be refcounted and unlocked + */ +int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx) +{ + int result; + struct device *dev = wusbhc->dev; + struct wusb_dev *wusb_dev; + struct wuie_reset *ie; + + d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); + mutex_lock(&wusbhc->mutex); + result = 0; + wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; + if (wusb_dev == NULL) { + /* reset no device? ignore */ + dev_dbg(dev, "RESET: no device at port %u, ignoring\n", + port_idx); + goto error_unlock; + } + result = -ENOMEM; + ie = kzalloc(sizeof(*ie), GFP_KERNEL); + if (ie == NULL) + goto error_unlock; + ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID); + ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE; + ie->CDID = wusb_dev->cdid; + result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr); + if (result < 0) { + dev_err(dev, "RESET: cant's set MMC: %d\n", result); + goto error_kfree; + } + __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); + + /* 120ms, hopefully 6 MMCs (FIXME) */ + msleep(120); + wusbhc_mmcie_rm(wusbhc, &ie->hdr); +error_kfree: + kfree(ie); +error_unlock: + mutex_unlock(&wusbhc->mutex); + d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); + return result; +} + +/* + * Handle a Device Notification coming a host + * + * The Device Notification comes from a host (HWA, DWA or WHCI) + * wrapped in a set of headers. Somebody else has peeled off those + * headers for us and we just get one Device Notifications. + * + * Invalid DNs (e.g., too short) are discarded. + * + * @wusbhc shall be referenced + * + * FIXMES: + * - implement priorities as in WUSB1.0[Table 7-55]? + */ +void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, + struct wusb_dn_hdr *dn_hdr, size_t size) +{ + struct device *dev = wusbhc->dev; + struct wusb_dev *wusb_dev; + + d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr); + + if (size < sizeof(struct wusb_dn_hdr)) { + dev_err(dev, "DN data shorter than DN header (%d < %d)\n", + (int)size, (int)sizeof(struct wusb_dn_hdr)); + goto out; + } + + wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); + if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { + dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", + dn_hdr->bType, srcaddr); + goto out; + } + + switch (dn_hdr->bType) { + case WUSB_DN_CONNECT: + wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); + break; + case WUSB_DN_ALIVE: + wusbhc_handle_dn_alive(wusbhc, wusb_dev); + break; + case WUSB_DN_DISCONNECT: + wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); + break; + case WUSB_DN_MASAVAILCHANGED: + case WUSB_DN_RWAKE: + case WUSB_DN_SLEEP: + /* FIXME: handle these DNs. */ + break; + case WUSB_DN_EPRDY: + /* The hardware handles these. */ + break; + default: + dev_warn(dev, "unknown DN %u (%d octets) from %u\n", + dn_hdr->bType, (int)size, srcaddr); + } +out: + d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr); + return; +} +EXPORT_SYMBOL_GPL(wusbhc_handle_dn); + +/* + * Disconnect a WUSB device from a the cluster + * + * @wusbhc + * @port Fake port where the device is (wusbhc index, not USB port number). + * + * In Wireless USB, a disconnect is basically telling the device he is + * being disconnected and forgetting about him. + * + * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100 + * ms and then keep going. + * + * We don't do much in case of error; we always pretend we disabled + * the port and disconnected the device. If physically the request + * didn't get there (many things can fail in the way there), the stack + * will reject the device's communication attempts. + * + * @wusbhc should be refcounted and locked + */ +void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx) +{ + int result; + struct device *dev = wusbhc->dev; + struct wusb_dev *wusb_dev; + struct wuie_disconnect *ie; + + d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); + result = 0; + wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; + if (wusb_dev == NULL) { + /* reset no device? ignore */ + dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", + port_idx); + goto error; + } + __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); + + result = -ENOMEM; + ie = kzalloc(sizeof(*ie), GFP_KERNEL); + if (ie == NULL) + goto error; + ie->hdr.bLength = sizeof(*ie); + ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; + ie->bDeviceAddress = wusb_dev->addr; + result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); + if (result < 0) { + dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); + goto error_kfree; + } + + /* 120ms, hopefully 6 MMCs */ + msleep(100); + wusbhc_mmcie_rm(wusbhc, &ie->hdr); +error_kfree: + kfree(ie); +error: + d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); + return; +} + +static void wusb_cap_descr_printf(const unsigned level, struct device *dev, + const struct usb_wireless_cap_descriptor *wcd) +{ + d_printf(level, dev, + "WUSB Capability Descriptor\n" + " bDevCapabilityType 0x%02x\n" + " bmAttributes 0x%02x\n" + " wPhyRates 0x%04x\n" + " bmTFITXPowerInfo 0x%02x\n" + " bmFFITXPowerInfo 0x%02x\n" + " bmBandGroup 0x%04x\n" + " bReserved 0x%02x\n", + wcd->bDevCapabilityType, + wcd->bmAttributes, + le16_to_cpu(wcd->wPHYRates), + wcd->bmTFITXPowerInfo, + wcd->bmFFITXPowerInfo, + wcd->bmBandGroup, + wcd->bReserved); +} + +/* + * Walk over the BOS descriptor, verify and grok it + * + * @usb_dev: referenced + * @wusb_dev: referenced and unlocked + * + * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a + * "flexible" way to wrap all kinds of descriptors inside an standard + * descriptor (wonder why they didn't use normal descriptors, + * btw). Not like they lack code. + * + * At the end we go to look for the WUSB Device Capabilities + * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor + * that is part of the BOS descriptor set. That tells us what does the + * device support (dual role, beacon type, UWB PHY rates). + */ +static int wusb_dev_bos_grok(struct usb_device *usb_dev, + struct wusb_dev *wusb_dev, + struct usb_bos_descriptor *bos, size_t desc_size) +{ + ssize_t result; + struct device *dev = &usb_dev->dev; + void *itr, *top; + + /* Walk over BOS capabilities, verify them */ + itr = (void *)bos + sizeof(*bos); + top = itr + desc_size - sizeof(*bos); + while (itr < top) { + struct usb_dev_cap_header *cap_hdr = itr; + size_t cap_size; + u8 cap_type; + if (top - itr < sizeof(*cap_hdr)) { + dev_err(dev, "Device BUG? premature end of BOS header " + "data [offset 0x%02x]: only %zu bytes left\n", + (int)(itr - (void *)bos), top - itr); + result = -ENOSPC; + goto error_bad_cap; + } + cap_size = cap_hdr->bLength; + cap_type = cap_hdr->bDevCapabilityType; + d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n", + cap_type, cap_size); + if (cap_size == 0) + break; + if (cap_size > top - itr) { + dev_err(dev, "Device BUG? premature end of BOS data " + "[offset 0x%02x cap %02x %zu bytes]: " + "only %zu bytes left\n", + (int)(itr - (void *)bos), + cap_type, cap_size, top - itr); + result = -EBADF; + goto error_bad_cap; + } + d_dump(3, dev, itr, cap_size); + switch (cap_type) { + case USB_CAP_TYPE_WIRELESS_USB: + if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) + dev_err(dev, "Device BUG? WUSB Capability " + "descriptor is %zu bytes vs %zu " + "needed\n", cap_size, + sizeof(*wusb_dev->wusb_cap_descr)); + else { + wusb_dev->wusb_cap_descr = itr; + wusb_cap_descr_printf(3, dev, itr); + } + break; + default: + dev_err(dev, "BUG? Unknown BOS capability 0x%02x " + "(%zu bytes) at offset 0x%02x\n", cap_type, + cap_size, (int)(itr - (void *)bos)); + } + itr += cap_size; + } + result = 0; +error_bad_cap: + return result; +} + +/* + * Add information from the BOS descriptors to the device + * + * @usb_dev: referenced + * @wusb_dev: referenced and unlocked + * + * So what we do is we alloc a space for the BOS descriptor of 64 + * bytes; read the first four bytes which include the wTotalLength + * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the + * whole thing. If not we realloc to that size. + * + * Then we call the groking function, that will fill up + * wusb_dev->wusb_cap_descr, which is what we'll need later on. + */ +static int wusb_dev_bos_add(struct usb_device *usb_dev, + struct wusb_dev *wusb_dev) +{ + ssize_t result; + struct device *dev = &usb_dev->dev; + struct usb_bos_descriptor *bos; + size_t alloc_size = 32, desc_size = 4; + + bos = kmalloc(alloc_size, GFP_KERNEL); + if (bos == NULL) + return -ENOMEM; + result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); + if (result < 4) { + dev_err(dev, "Can't get BOS descriptor or too short: %zd\n", + result); + goto error_get_descriptor; + } + desc_size = le16_to_cpu(bos->wTotalLength); + if (desc_size >= alloc_size) { + kfree(bos); + alloc_size = desc_size; + bos = kmalloc(alloc_size, GFP_KERNEL); + if (bos == NULL) + return -ENOMEM; + } + result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); + if (result < 0 || result != desc_size) { + dev_err(dev, "Can't get BOS descriptor or too short (need " + "%zu bytes): %zd\n", desc_size, result); + goto error_get_descriptor; + } + if (result < sizeof(*bos) + || le16_to_cpu(bos->wTotalLength) != desc_size) { + dev_err(dev, "Can't get BOS descriptor or too short (need " + "%zu bytes): %zd\n", desc_size, result); + goto error_get_descriptor; + } + d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n", + result, bos->bNumDeviceCaps); + d_dump(2, dev, bos, result); + result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); + if (result < 0) + goto error_bad_bos; + wusb_dev->bos = bos; + return 0; + +error_bad_bos: +error_get_descriptor: + kfree(bos); + wusb_dev->wusb_cap_descr = NULL; + return result; +} + +static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev) +{ + kfree(wusb_dev->bos); + wusb_dev->wusb_cap_descr = NULL; +}; + +static struct usb_wireless_cap_descriptor wusb_cap_descr_default = { + .bLength = sizeof(wusb_cap_descr_default), + .bDescriptorType = USB_DT_DEVICE_CAPABILITY, + .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB, + + .bmAttributes = USB_WIRELESS_BEACON_NONE, + .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53), + .bmTFITXPowerInfo = 0, + .bmFFITXPowerInfo = 0, + .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */ + .bReserved = 0 +}; + +/* + * USB stack's device addition Notifier Callback + * + * Called from drivers/usb/core/hub.c when a new device is added; we + * use this hook to perform certain WUSB specific setup work on the + * new device. As well, it is the first time we can connect the + * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a + * reference that we'll drop. + * + * First we need to determine if the device is a WUSB device (else we + * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE) + * [FIXME: maybe we'd need something more definitive]. If so, we track + * it's usb_busd and from there, the WUSB HC. + * + * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we + * get the wusbhc for the device. + * + * We have a reference on @usb_dev (as we are called at the end of its + * enumeration). + * + * NOTE: @usb_dev locked + */ +static void wusb_dev_add_ncb(struct usb_device *usb_dev) +{ + int result = 0; + struct wusb_dev *wusb_dev; + struct wusbhc *wusbhc; + struct device *dev = &usb_dev->dev; + u8 port_idx; + + if (usb_dev->wusb == 0 || usb_dev->devnum == 1) + return; /* skip non wusb and wusb RHs */ + + d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev); + + wusbhc = wusbhc_get_by_usb_dev(usb_dev); + if (wusbhc == NULL) + goto error_nodev; + mutex_lock(&wusbhc->mutex); + wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); + port_idx = wusb_port_no_to_idx(usb_dev->portnum); + mutex_unlock(&wusbhc->mutex); + if (wusb_dev == NULL) + goto error_nodev; + wusb_dev->usb_dev = usb_get_dev(usb_dev); + usb_dev->wusb_dev = wusb_dev_get(wusb_dev); + result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev); + if (result < 0) { + dev_err(dev, "Cannot enable security: %d\n", result); + goto error_sec_add; + } + /* Now query the device for it's BOS and attach it to wusb_dev */ + result = wusb_dev_bos_add(usb_dev, wusb_dev); + if (result < 0) { + dev_err(dev, "Cannot get BOS descriptors: %d\n", result); + goto error_bos_add; + } + result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev); + if (result < 0) + goto error_add_sysfs; +out: + wusb_dev_put(wusb_dev); + wusbhc_put(wusbhc); +error_nodev: + d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev); + return; + + wusb_dev_sysfs_rm(wusb_dev); +error_add_sysfs: + wusb_dev_bos_rm(wusb_dev); +error_bos_add: + wusb_dev_sec_rm(wusb_dev); +error_sec_add: + mutex_lock(&wusbhc->mutex); + __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); + mutex_unlock(&wusbhc->mutex); + goto out; +} + +/* + * Undo all the steps done at connection by the notifier callback + * + * NOTE: @usb_dev locked + */ +static void wusb_dev_rm_ncb(struct usb_device *usb_dev) +{ + struct wusb_dev *wusb_dev = usb_dev->wusb_dev; + + if (usb_dev->wusb == 0 || usb_dev->devnum == 1) + return; /* skip non wusb and wusb RHs */ + + wusb_dev_sysfs_rm(wusb_dev); + wusb_dev_bos_rm(wusb_dev); + wusb_dev_sec_rm(wusb_dev); + wusb_dev->usb_dev = NULL; + usb_dev->wusb_dev = NULL; + wusb_dev_put(wusb_dev); + usb_put_dev(usb_dev); +} + +/* + * Handle notifications from the USB stack (notifier call back) + * + * This is called when the USB stack does a + * usb_{bus,device}_{add,remove}() so we can do WUSB specific + * handling. It is called with [for the case of + * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked. + */ +int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, + void *priv) +{ + int result = NOTIFY_OK; + + switch (val) { + case USB_DEVICE_ADD: + wusb_dev_add_ncb(priv); + break; + case USB_DEVICE_REMOVE: + wusb_dev_rm_ncb(priv); + break; + case USB_BUS_ADD: + /* ignore (for now) */ + case USB_BUS_REMOVE: + break; + default: + WARN_ON(1); + result = NOTIFY_BAD; + }; + return result; +} + +/* + * Return a referenced wusb_dev given a @wusbhc and @usb_dev + */ +struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc, + struct usb_device *usb_dev) +{ + struct wusb_dev *wusb_dev; + u8 port_idx; + + port_idx = wusb_port_no_to_idx(usb_dev->portnum); + BUG_ON(port_idx > wusbhc->ports_max); + wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; + if (wusb_dev != NULL) /* ops, device is gone */ + wusb_dev_get(wusb_dev); + return wusb_dev; +} +EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev); + +void wusb_dev_destroy(struct kref *_wusb_dev) +{ + struct wusb_dev *wusb_dev + = container_of(_wusb_dev, struct wusb_dev, refcnt); + list_del_init(&wusb_dev->cack_node); + wusb_dev_free(wusb_dev); + d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev); +} +EXPORT_SYMBOL_GPL(wusb_dev_destroy); + +/* + * Create all the device connect handling infrastructure + * + * This is basically the device info array, Connect Acknowledgement + * (cack) lists, keep-alive timers (and delayed work thread). + */ +int wusbhc_devconnect_create(struct wusbhc *wusbhc) +{ + d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); + + wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; + wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); + INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); + + wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK; + wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); + INIT_LIST_HEAD(&wusbhc->cack_list); + + d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); + return 0; +} + +/* + * Release all resources taken by the devconnect stuff + */ +void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) +{ + d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); + d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); +} + +/* + * wusbhc_devconnect_start - start accepting device connections + * @wusbhc: the WUSB HC + * + * Sets the Host Info IE to accept all new connections. + * + * FIXME: This also enables the keep alives but this is not necessary + * until there are connected and authenticated devices. + */ +int wusbhc_devconnect_start(struct wusbhc *wusbhc, + const struct wusb_ckhdid *chid) +{ + struct device *dev = wusbhc->dev; + struct wuie_host_info *hi; + int result; + + hi = kzalloc(sizeof(*hi), GFP_KERNEL); + if (hi == NULL) + return -ENOMEM; + + hi->hdr.bLength = sizeof(*hi); + hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; + hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); + hi->CHID = *chid; + result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); + if (result < 0) { + dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); + goto error_mmcie_set; + } + wusbhc->wuie_host_info = hi; + + queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, + (wusbhc->trust_timeout*CONFIG_HZ)/1000/2); + + return 0; + +error_mmcie_set: + kfree(hi); + return result; +} + +/* + * wusbhc_devconnect_stop - stop managing connected devices + * @wusbhc: the WUSB HC + * + * Removes the Host Info IE and stops the keep alives. + * + * FIXME: should this disconnect all devices? + */ +void wusbhc_devconnect_stop(struct wusbhc *wusbhc) +{ + cancel_delayed_work_sync(&wusbhc->keep_alive_timer); + WARN_ON(!list_empty(&wusbhc->cack_list)); + + wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); + kfree(wusbhc->wuie_host_info); + wusbhc->wuie_host_info = NULL; +} + +/* + * wusb_set_dev_addr - set the WUSB device address used by the host + * @wusbhc: the WUSB HC the device is connect to + * @wusb_dev: the WUSB device + * @addr: new device address + */ +int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr) +{ + int result; + + wusb_dev->addr = addr; + result = wusbhc->dev_info_set(wusbhc, wusb_dev); + if (result) + dev_err(wusbhc->dev, "device %d: failed to set device " + "address\n", wusb_dev->port_idx); + else + dev_info(wusbhc->dev, "device %d: %s addr %u\n", + wusb_dev->port_idx, + (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth", + wusb_dev->addr); + + return result; +} diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c new file mode 100644 index 0000000..e5390b7 --- /dev/null +++ b/drivers/usb/wusbcore/mmc.c @@ -0,0 +1,329 @@ +/* + * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) + * MMC (Microscheduled Management Command) handling + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * WUIEs and MMC IEs...well, they are almost the same at the end. MMC + * IEs are Wireless USB IEs that go into the MMC period...[what is + * that? look in Design-overview.txt]. + * + * + * This is a simple subsystem to keep track of which IEs are being + * sent by the host in the MMC period. + * + * For each WUIE we ask to send, we keep it in an array, so we can + * request its removal later, or replace the content. They are tracked + * by pointer, so be sure to use the same pointer if you want to + * remove it or update the contents. + * + * FIXME: + * - add timers that autoremove intervalled IEs? + */ +#include +#include "wusbhc.h" + +/* Initialize the MMCIEs handling mechanism */ +int wusbhc_mmcie_create(struct wusbhc *wusbhc) +{ + u8 mmcies = wusbhc->mmcies_max; + wusbhc->mmcie = kzalloc(mmcies * sizeof(wusbhc->mmcie[0]), GFP_KERNEL); + if (wusbhc->mmcie == NULL) + return -ENOMEM; + mutex_init(&wusbhc->mmcie_mutex); + return 0; +} + +/* Release resources used by the MMCIEs handling mechanism */ +void wusbhc_mmcie_destroy(struct wusbhc *wusbhc) +{ + kfree(wusbhc->mmcie); +} + +/* + * Add or replace an MMC Wireless USB IE. + * + * @interval: See WUSB1.0[8.5.3.1] + * @repeat_cnt: See WUSB1.0[8.5.3.1] + * @handle: See WUSB1.0[8.5.3.1] + * @wuie: Pointer to the header of the WUSB IE data to add. + * MUST BE allocated in a kmalloc buffer (no stack or + * vmalloc). + * THE CALLER ALWAYS OWNS THE POINTER (we don't free it + * on remove, we just forget about it). + * @returns: 0 if ok, < 0 errno code on error. + * + * Goes over the *whole* @wusbhc->mmcie array looking for (a) the + * first free spot and (b) if @wuie is already in the array (aka: + * transmitted in the MMCs) the spot were it is. + * + * If present, we "overwrite it" (update). + * + * + * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38. + * The host uses the handle as the 'sort' index. We + * allocate the last one always for the WUIE_ID_HOST_INFO, and + * the rest, first come first serve in inverse order. + * + * Host software must make sure that it adds the other IEs in + * the right order... the host hardware is responsible for + * placing the WCTA IEs in the right place with the other IEs + * set by host software. + * + * NOTE: we can access wusbhc->wa_descr without locking because it is + * read only. + */ +int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, + struct wuie_hdr *wuie) +{ + int result = -ENOBUFS; + struct device *dev = wusbhc->dev; + unsigned handle, itr; + + /* Search a handle, taking into account the ordering */ + mutex_lock(&wusbhc->mmcie_mutex); + switch (wuie->bIEIdentifier) { + case WUIE_ID_HOST_INFO: + /* Always last */ + handle = wusbhc->mmcies_max - 1; + break; + case WUIE_ID_ISOCH_DISCARD: + dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x " + "unimplemented\n", wuie->bIEIdentifier); + result = -ENOSYS; + goto error_unlock; + default: + /* search for it or find the last empty slot */ + handle = ~0; + for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) { + if (wusbhc->mmcie[itr] == wuie) { + handle = itr; + break; + } + if (wusbhc->mmcie[itr] == NULL) + handle = itr; + } + if (handle == ~0) { + if (printk_ratelimit()) + dev_err(dev, "MMC handle space exhausted\n"); + goto error_unlock; + } + } + result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle, + wuie); + if (result >= 0) + wusbhc->mmcie[handle] = wuie; +error_unlock: + mutex_unlock(&wusbhc->mmcie_mutex); + return result; +} +EXPORT_SYMBOL_GPL(wusbhc_mmcie_set); + +/* + * Remove an MMC IE previously added with wusbhc_mmcie_set() + * + * @wuie Pointer used to add the WUIE + */ +void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie) +{ + int result; + struct device *dev = wusbhc->dev; + unsigned handle, itr; + + mutex_lock(&wusbhc->mmcie_mutex); + for (itr = 0; itr < wusbhc->mmcies_max; itr++) + if (wusbhc->mmcie[itr] == wuie) { + handle = itr; + goto found; + } + mutex_unlock(&wusbhc->mmcie_mutex); + return; + +found: + result = (wusbhc->mmcie_rm)(wusbhc, handle); + if (result == 0) + wusbhc->mmcie[itr] = NULL; + else if (printk_ratelimit()) + dev_err(dev, "MMC: Failed to remove IE %p (0x%02x)\n", + wuie, wuie->bIEIdentifier); + mutex_unlock(&wusbhc->mmcie_mutex); + return; +} +EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); + +/* + * wusbhc_start - start transmitting MMCs and accepting connections + * @wusbhc: the HC to start + * @chid: the CHID to use for this host + * + * Establishes a cluster reservation, enables device connections, and + * starts MMCs with appropriate DNTS parameters. + */ +int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) +{ + int result; + struct device *dev = wusbhc->dev; + + WARN_ON(wusbhc->wuie_host_info != NULL); + + result = wusbhc_rsv_establish(wusbhc); + if (result < 0) { + dev_err(dev, "cannot establish cluster reservation: %d\n", + result); + goto error_rsv_establish; + } + + result = wusbhc_devconnect_start(wusbhc, chid); + if (result < 0) { + dev_err(dev, "error enabling device connections: %d\n", result); + goto error_devconnect_start; + } + + result = wusbhc_sec_start(wusbhc); + if (result < 0) { + dev_err(dev, "error starting security in the HC: %d\n", result); + goto error_sec_start; + } + /* FIXME: the choice of the DNTS parameters is somewhat + * arbitrary */ + result = wusbhc->set_num_dnts(wusbhc, 0, 15); + if (result < 0) { + dev_err(dev, "Cannot set DNTS parameters: %d\n", result); + goto error_set_num_dnts; + } + result = wusbhc->start(wusbhc); + if (result < 0) { + dev_err(dev, "error starting wusbch: %d\n", result); + goto error_wusbhc_start; + } + wusbhc->active = 1; + return 0; + +error_wusbhc_start: + wusbhc_sec_stop(wusbhc); +error_set_num_dnts: +error_sec_start: + wusbhc_devconnect_stop(wusbhc); +error_devconnect_start: + wusbhc_rsv_terminate(wusbhc); +error_rsv_establish: + return result; +} + +/* + * Disconnect all from the WUSB Channel + * + * Send a Host Disconnect IE in the MMC, wait, don't send it any more + */ +static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc) +{ + int result = -ENOMEM; + struct wuie_host_disconnect *host_disconnect_ie; + might_sleep(); + host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL); + if (host_disconnect_ie == NULL) + goto error_alloc; + host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie); + host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT; + result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr); + if (result < 0) + goto error_mmcie_set; + + /* WUSB1.0[8.5.3.1 & 7.5.2] */ + msleep(100); + wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr); +error_mmcie_set: + kfree(host_disconnect_ie); +error_alloc: + return result; +} + +/* + * wusbhc_stop - stop transmitting MMCs + * @wusbhc: the HC to stop + * + * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs). + * + * If we can't allocate a Host Stop IE, screw it, we don't notify the + * devices we are disconnecting... + */ +void wusbhc_stop(struct wusbhc *wusbhc) +{ + if (wusbhc->active) { + wusbhc->active = 0; + wusbhc->stop(wusbhc); + wusbhc_sec_stop(wusbhc); + __wusbhc_host_disconnect_ie(wusbhc); + wusbhc_devconnect_stop(wusbhc); + wusbhc_rsv_terminate(wusbhc); + } +} +EXPORT_SYMBOL_GPL(wusbhc_stop); + +/* + * Change the CHID in a WUSB Channel + * + * If it is just a new CHID, send a Host Disconnect IE and then change + * the CHID IE. + */ +static int __wusbhc_chid_change(struct wusbhc *wusbhc, + const struct wusb_ckhdid *chid) +{ + int result = -ENOSYS; + struct device *dev = wusbhc->dev; + dev_err(dev, "%s() not implemented yet\n", __func__); + return result; + + BUG_ON(wusbhc->wuie_host_info == NULL); + __wusbhc_host_disconnect_ie(wusbhc); + wusbhc->wuie_host_info->CHID = *chid; + result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr); + if (result < 0) + dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result); + return result; +} + +/* + * Set/reset/update a new CHID + * + * Depending on the previous state of the MMCs, start, stop or change + * the sent MMC. This effectively switches the host controller on and + * off (radio wise). + */ +int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) +{ + int result = 0; + + if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0) + chid = NULL; + + mutex_lock(&wusbhc->mutex); + if (wusbhc->active) { + if (chid) + result = __wusbhc_chid_change(wusbhc, chid); + else + wusbhc_stop(wusbhc); + } else { + if (chid) + wusbhc_start(wusbhc, chid); + } + mutex_unlock(&wusbhc->mutex); + return result; +} +EXPORT_SYMBOL_GPL(wusbhc_chid_set); -- cgit v0.10.2 From d59db761b8559f07a7161ca3387d6c6949667ede Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:25 +0100 Subject: wusb: add the Wireless USB core (security) Add the WUSB security (authentication) code. Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c new file mode 100644 index 0000000..c36c438 --- /dev/null +++ b/drivers/usb/wusbcore/crypto.c @@ -0,0 +1,538 @@ +/* + * Ultra Wide Band + * AES-128 CCM Encryption + * + * Copyright (C) 2007 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * We don't do any encryption here; we use the Linux Kernel's AES-128 + * crypto modules to construct keys and payload blocks in a way + * defined by WUSB1.0[6]. Check the erratas, as typos are are patched + * there. + * + * Thanks a zillion to John Keys for his help and clarifications over + * the designed-by-a-committee text. + * + * So the idea is that there is this basic Pseudo-Random-Function + * defined in WUSB1.0[6.5] which is the core of everything. It works + * by tweaking some blocks, AES crypting them and then xoring + * something else with them (this seems to be called CBC(AES) -- can + * you tell I know jack about crypto?). So we just funnel it into the + * Linux Crypto API. + * + * We leave a crypto test module so we can verify that vectors match, + * every now and then. + * + * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I + * am learning a lot... + * + * Conveniently, some data structures that need to be + * funneled through AES are...16 bytes in size! + */ + +#include +#include +#include +#include +#include +#include +#define D_LOCAL 0 +#include + + +/* + * Block of data, as understood by AES-CCM + * + * The code assumes this structure is nothing but a 16 byte array + * (packed in a struct to avoid common mess ups that I usually do with + * arrays and enforcing type checking). + */ +struct aes_ccm_block { + u8 data[16]; +} __attribute__((packed)); + +/* + * Counter-mode Blocks (WUSB1.0[6.4]) + * + * According to CCM (or so it seems), for the purpose of calculating + * the MIC, the message is broken in N counter-mode blocks, B0, B1, + * ... BN. + * + * B0 contains flags, the CCM nonce and l(m). + * + * B1 contains l(a), the MAC header, the encryption offset and padding. + * + * If EO is nonzero, additional blocks are built from payload bytes + * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The + * padding is not xmitted. + */ + +/* WUSB1.0[T6.4] */ +struct aes_ccm_b0 { + u8 flags; /* 0x59, per CCM spec */ + struct aes_ccm_nonce ccm_nonce; + __be16 lm; +} __attribute__((packed)); + +/* WUSB1.0[T6.5] */ +struct aes_ccm_b1 { + __be16 la; + u8 mac_header[10]; + __le16 eo; + u8 security_reserved; /* This is always zero */ + u8 padding; /* 0 */ +} __attribute__((packed)); + +/* + * Encryption Blocks (WUSB1.0[6.4.4]) + * + * CCM uses Ax blocks to generate a keystream with which the MIC and + * the message's payload are encoded. A0 always encrypts/decrypts the + * MIC. Ax (x>0) are used for the sucesive payload blocks. + * + * The x is the counter, and is increased for each block. + */ +struct aes_ccm_a { + u8 flags; /* 0x01, per CCM spec */ + struct aes_ccm_nonce ccm_nonce; + __be16 counter; /* Value of x */ +} __attribute__((packed)); + +static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2, + size_t size) +{ + u8 *bo = _bo; + const u8 *bi1 = _bi1, *bi2 = _bi2; + size_t itr; + for (itr = 0; itr < size; itr++) + bo[itr] = bi1[itr] ^ bi2[itr]; +} + +/* + * CC-MAC function WUSB1.0[6.5] + * + * Take a data string and produce the encrypted CBC Counter-mode MIC + * + * Note the names for most function arguments are made to (more or + * less) match those used in the pseudo-function definition given in + * WUSB1.0[6.5]. + * + * @tfm_cbc: CBC(AES) blkcipher handle (initialized) + * + * @tfm_aes: AES cipher handle (initialized) + * + * @mic: buffer for placing the computed MIC (Message Integrity + * Code). This is exactly 8 bytes, and we expect the buffer to + * be at least eight bytes in length. + * + * @key: 128 bit symmetric key + * + * @n: CCM nonce + * + * @a: ASCII string, 14 bytes long (I guess zero padded if needed; + * we use exactly 14 bytes). + * + * @b: data stream to be processed; cannot be a global or const local + * (will confuse the scatterlists) + * + * @blen: size of b... + * + * Still not very clear how this is done, but looks like this: we + * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with + * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we + * take the payload and divide it in blocks (16 bytes), xor them with + * the previous crypto result (16 bytes) and crypt it, repeat the next + * block with the output of the previous one, rinse wash (I guess this + * is what AES CBC mode means...but I truly have no idea). So we use + * the CBC(AES) blkcipher, that does precisely that. The IV (Initial + * Vector) is 16 bytes and is set to zero, so + * + * See rfc3610. Linux crypto has a CBC implementation, but the + * documentation is scarce, to say the least, and the example code is + * so intricated that is difficult to understand how things work. Most + * of this is guess work -- bite me. + * + * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and + * using the 14 bytes of @a to fill up + * b1.{mac_header,e0,security_reserved,padding}. + * + * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of + * l(m) is orthogonal, they bear no relationship, so it is not + * in conflict with the parameter's relation that + * WUSB1.0[6.4.2]) defines. + * + * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in + * first errata released on 2005/07. + * + * NOTE: we need to clean IV to zero at each invocation to make sure + * we start with a fresh empty Initial Vector, so that the CBC + * works ok. + * + * NOTE: blen is not aligned to a block size, we'll pad zeros, that's + * what sg[4] is for. Maybe there is a smarter way to do this. + */ +static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, + struct crypto_cipher *tfm_aes, void *mic, + const struct aes_ccm_nonce *n, + const struct aes_ccm_label *a, const void *b, + size_t blen) +{ + int result = 0; + struct blkcipher_desc desc; + struct aes_ccm_b0 b0; + struct aes_ccm_b1 b1; + struct aes_ccm_a ax; + struct scatterlist sg[4], sg_dst; + void *iv, *dst_buf; + size_t ivsize, dst_size; + const u8 bzero[16] = { 0 }; + size_t zero_padding; + + d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " + "n %p, a %p, b %p, blen %zu)\n", + tfm_cbc, tfm_aes, mic, n, a, b, blen); + /* + * These checks should be compile time optimized out + * ensure @a fills b1's mac_header and following fields + */ + WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la)); + WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block)); + WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block)); + WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block)); + + result = -ENOMEM; + zero_padding = sizeof(struct aes_ccm_block) + - blen % sizeof(struct aes_ccm_block); + zero_padding = blen % sizeof(struct aes_ccm_block); + if (zero_padding) + zero_padding = sizeof(struct aes_ccm_block) - zero_padding; + dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding; + dst_buf = kzalloc(dst_size, GFP_KERNEL); + if (dst_buf == NULL) { + printk(KERN_ERR "E: can't alloc destination buffer\n"); + goto error_dst_buf; + } + + iv = crypto_blkcipher_crt(tfm_cbc)->iv; + ivsize = crypto_blkcipher_ivsize(tfm_cbc); + memset(iv, 0, ivsize); + + /* Setup B0 */ + b0.flags = 0x59; /* Format B0 */ + b0.ccm_nonce = *n; + b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */ + + /* Setup B1 + * + * The WUSB spec is anything but clear! WUSB1.0[6.5] + * says that to initialize B1 from A with 'l(a) = blen + + * 14'--after clarification, it means to use A's contents + * for MAC Header, EO, sec reserved and padding. + */ + b1.la = cpu_to_be16(blen + 14); + memcpy(&b1.mac_header, a, sizeof(*a)); + + d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0)); + d_dump(4, NULL, &b0, sizeof(b0)); + d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1)); + d_dump(4, NULL, &b1, sizeof(b1)); + d_printf(4, NULL, "I: B (%zu bytes)\n", blen); + d_dump(4, NULL, b, blen); + d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding); + d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize); + d_dump(4, NULL, iv, ivsize); + + sg_init_table(sg, ARRAY_SIZE(sg)); + sg_set_buf(&sg[0], &b0, sizeof(b0)); + sg_set_buf(&sg[1], &b1, sizeof(b1)); + sg_set_buf(&sg[2], b, blen); + /* 0 if well behaved :) */ + sg_set_buf(&sg[3], bzero, zero_padding); + sg_init_one(&sg_dst, dst_buf, dst_size); + + desc.tfm = tfm_cbc; + desc.flags = 0; + result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size); + if (result < 0) { + printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n", + result); + goto error_cbc_crypt; + } + d_printf(4, NULL, "D: MIC tag\n"); + d_dump(4, NULL, iv, ivsize); + + /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] + * The procedure is to AES crypt the A0 block and XOR the MIC + * Tag agains it; we only do the first 8 bytes and place it + * directly in the destination buffer. + * + * POS Crypto API: size is assumed to be AES's block size. + * Thanks for documenting it -- tip taken from airo.c + */ + ax.flags = 0x01; /* as per WUSB 1.0 spec */ + ax.ccm_nonce = *n; + ax.counter = 0; + crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); + bytewise_xor(mic, &ax, iv, 8); + d_printf(4, NULL, "D: CTR[MIC]\n"); + d_dump(4, NULL, &ax, 8); + d_printf(4, NULL, "D: CCM-MIC tag\n"); + d_dump(4, NULL, mic, 8); + result = 8; +error_cbc_crypt: + kfree(dst_buf); +error_dst_buf: + d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " + "n %p, a %p, b %p, blen %zu)\n", + tfm_cbc, tfm_aes, mic, n, a, b, blen); + return result; +} + +/* + * WUSB Pseudo Random Function (WUSB1.0[6.5]) + * + * @b: buffer to the source data; cannot be a global or const local + * (will confuse the scatterlists) + */ +ssize_t wusb_prf(void *out, size_t out_size, + const u8 key[16], const struct aes_ccm_nonce *_n, + const struct aes_ccm_label *a, + const void *b, size_t blen, size_t len) +{ + ssize_t result, bytes = 0, bitr; + struct aes_ccm_nonce n = *_n; + struct crypto_blkcipher *tfm_cbc; + struct crypto_cipher *tfm_aes; + u64 sfn = 0; + __le64 sfn_le; + + d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " + "a %p, b %p, blen %zu, len %zu)\n", out, out_size, + key, _n, a, b, blen, len); + + tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm_cbc)) { + result = PTR_ERR(tfm_cbc); + printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result); + goto error_alloc_cbc; + } + result = crypto_blkcipher_setkey(tfm_cbc, key, 16); + if (result < 0) { + printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result); + goto error_setkey_cbc; + } + + tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm_aes)) { + result = PTR_ERR(tfm_aes); + printk(KERN_ERR "E: can't load AES: %d\n", (int)result); + goto error_alloc_aes; + } + result = crypto_cipher_setkey(tfm_aes, key, 16); + if (result < 0) { + printk(KERN_ERR "E: can't set AES key: %d\n", (int)result); + goto error_setkey_aes; + } + + for (bitr = 0; bitr < (len + 63) / 64; bitr++) { + sfn_le = cpu_to_le64(sfn++); + memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ + result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes, + &n, a, b, blen); + if (result < 0) + goto error_ccm_mac; + bytes += result; + } + result = bytes; +error_ccm_mac: +error_setkey_aes: + crypto_free_cipher(tfm_aes); +error_alloc_aes: +error_setkey_cbc: + crypto_free_blkcipher(tfm_cbc); +error_alloc_cbc: + d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " + "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size, + key, _n, a, b, blen, len, (int)bytes); + return result; +} + +/* WUSB1.0[A.2] test vectors */ +static const u8 stv_hsmic_key[16] = { + 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, + 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f +}; + +static const struct aes_ccm_nonce stv_hsmic_n = { + .sfn = { 0 }, + .tkid = { 0x76, 0x98, 0x01, }, + .dest_addr = { .data = { 0xbe, 0x00 } }, + .src_addr = { .data = { 0x76, 0x98 } }, +}; + +/* + * Out-of-band MIC Generation verification code + * + */ +static int wusb_oob_mic_verify(void) +{ + int result; + u8 mic[8]; + /* WUSB1.0[A.2] test vectors + * + * Need to keep it in the local stack as GCC 4.1.3something + * messes up and generates noise. + */ + struct usb_handshake stv_hsmic_hs = { + .bMessageNumber = 2, + .bStatus = 00, + .tTKID = { 0x76, 0x98, 0x01 }, + .bReserved = 00, + .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, + 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, + 0x3c, 0x3d, 0x3e, 0x3f }, + .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, + 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, + 0x2c, 0x2d, 0x2e, 0x2f }, + .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c, + 0x14, 0x7b } , + }; + size_t hs_size; + + result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs); + if (result < 0) + printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result); + else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) { + printk(KERN_ERR "E: OOB MIC test: " + "mismatch between MIC result and WUSB1.0[A2]\n"); + hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); + printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); + dump_bytes(NULL, &stv_hsmic_hs, hs_size); + printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", + sizeof(stv_hsmic_n)); + dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n)); + printk(KERN_ERR "E: MIC out:\n"); + dump_bytes(NULL, mic, sizeof(mic)); + printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); + dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); + result = -EINVAL; + } else + result = 0; + return result; +} + +/* + * Test vectors for Key derivation + * + * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1] + * (errata corrected in 2005/07). + */ +static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = { + 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, + 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f +}; + +static const struct aes_ccm_nonce stv_keydvt_n_a1 = { + .sfn = { 0 }, + .tkid = { 0x76, 0x98, 0x01, }, + .dest_addr = { .data = { 0xbe, 0x00 } }, + .src_addr = { .data = { 0x76, 0x98 } }, +}; + +static const struct wusb_keydvt_out stv_keydvt_out_a1 = { + .kck = { + 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d, + 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f + }, + .ptk = { + 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06, + 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d + } +}; + +/* + * Performa a test to make sure we match the vectors defined in + * WUSB1.0[A.1](Errata2006/12) + */ +static int wusb_key_derive_verify(void) +{ + int result = 0; + struct wusb_keydvt_out keydvt_out; + /* These come from WUSB1.0[A.1] + 2006/12 errata + * NOTE: can't make this const or global -- somehow it seems + * the scatterlists for crypto get confused and we get + * bad data. There is no doc on this... */ + struct wusb_keydvt_in stv_keydvt_in_a1 = { + .hnonce = { + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f + }, + .dnonce = { + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f + } + }; + + result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1, + &stv_keydvt_in_a1); + if (result < 0) + printk(KERN_ERR "E: WUSB key derivation test: " + "derivation failed: %d\n", result); + if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) { + printk(KERN_ERR "E: WUSB key derivation test: " + "mismatch between key derivation result " + "and WUSB1.0[A1] Errata 2006/12\n"); + printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n", + sizeof(stv_key_a1)); + dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1)); + printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n", + sizeof(stv_keydvt_n_a1)); + dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); + printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n", + sizeof(stv_keydvt_in_a1)); + dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); + printk(KERN_ERR "E: keydvt out: KCK\n"); + dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck)); + printk(KERN_ERR "E: keydvt out: PTK\n"); + dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk)); + result = -EINVAL; + } else + result = 0; + return result; +} + +/* + * Initialize crypto system + * + * FIXME: we do nothing now, other than verifying. Later on we'll + * cache the encryption stuff, so that's why we have a separate init. + */ +int wusb_crypto_init(void) +{ + int result; + + result = wusb_key_derive_verify(); + if (result < 0) + return result; + return wusb_oob_mic_verify(); +} + +void wusb_crypto_exit(void) +{ + /* FIXME: free cached crypto transforms */ +} diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c new file mode 100644 index 0000000..a101cad --- /dev/null +++ b/drivers/usb/wusbcore/security.c @@ -0,0 +1,642 @@ +/* + * Wireless USB Host Controller + * Security support: encryption enablement, etc + * + * Copyright (C) 2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ +#include +#include +#include +#include "wusbhc.h" + +/* + * DEBUG & SECURITY WARNING!!!! + * + * If you enable this past 1, the debug code will weaken the + * cryptographic safety of the system (on purpose, for debugging). + * + * Weaken means: + * we print secret keys and intermediate values all the way, + */ +#undef D_LOCAL +#define D_LOCAL 2 +#include + +static void wusbhc_set_gtk_callback(struct urb *urb); +static void wusbhc_gtk_rekey_done_work(struct work_struct *work); + +int wusbhc_sec_create(struct wusbhc *wusbhc) +{ + wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data); + wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY; + wusbhc->gtk.descr.bReserved = 0; + + wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, + WUSB_KEY_INDEX_ORIGINATOR_HOST); + + INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work); + + return 0; +} + + +/* Called when the HC is destroyed */ +void wusbhc_sec_destroy(struct wusbhc *wusbhc) +{ +} + + +/** + * wusbhc_next_tkid - generate a new, currently unused, TKID + * @wusbhc: the WUSB host controller + * @wusb_dev: the device whose PTK the TKID is for + * (or NULL for a TKID for a GTK) + * + * The generated TKID consist of two parts: the device's authenicated + * address (or 0 or a GTK); and an incrementing number. This ensures + * that TKIDs cannot be shared between devices and by the time the + * incrementing number wraps around the older TKIDs will no longer be + * in use (a maximum of two keys may be active at any one time). + */ +static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + u32 *tkid; + u32 addr; + + if (wusb_dev == NULL) { + tkid = &wusbhc->gtk_tkid; + addr = 0; + } else { + tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid; + addr = wusb_dev->addr & 0x7f; + } + + *tkid = (addr << 8) | ((*tkid + 1) & 0xff); + + return *tkid; +} + +static void wusbhc_generate_gtk(struct wusbhc *wusbhc) +{ + const size_t key_size = sizeof(wusbhc->gtk.data); + u32 tkid; + + tkid = wusbhc_next_tkid(wusbhc, NULL); + + wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff; + wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff; + wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff; + + get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size); +} + +/** + * wusbhc_sec_start - start the security management process + * @wusbhc: the WUSB host controller + * + * Generate and set an initial GTK on the host controller. + * + * Called when the HC is started. + */ +int wusbhc_sec_start(struct wusbhc *wusbhc) +{ + const size_t key_size = sizeof(wusbhc->gtk.data); + int result; + + wusbhc_generate_gtk(wusbhc); + + result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, + &wusbhc->gtk.descr.bKeyData, key_size); + if (result < 0) + dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n", + result); + + return result; +} + +/** + * wusbhc_sec_stop - stop the security management process + * @wusbhc: the WUSB host controller + * + * Wait for any pending GTK rekeys to stop. + */ +void wusbhc_sec_stop(struct wusbhc *wusbhc) +{ + cancel_work_sync(&wusbhc->gtk_rekey_done_work); +} + + +/** @returns encryption type name */ +const char *wusb_et_name(u8 x) +{ + switch (x) { + case USB_ENC_TYPE_UNSECURE: return "unsecure"; + case USB_ENC_TYPE_WIRED: return "wired"; + case USB_ENC_TYPE_CCM_1: return "CCM-1"; + case USB_ENC_TYPE_RSA_1: return "RSA-1"; + default: return "unknown"; + } +} +EXPORT_SYMBOL_GPL(wusb_et_name); + +/* + * Set the device encryption method + * + * We tell the device which encryption method to use; we do this when + * setting up the device's security. + */ +static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value) +{ + int result; + struct device *dev = &usb_dev->dev; + struct wusb_dev *wusb_dev = usb_dev->wusb_dev; + + if (value) { + value = wusb_dev->ccm1_etd.bEncryptionValue; + } else { + /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */ + value = 0; + } + /* Set device's */ + result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_ENCRYPTION, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + value, 0, NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0) + dev_err(dev, "Can't set device's WUSB encryption to " + "%s (value %d): %d\n", + wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType), + wusb_dev->ccm1_etd.bEncryptionValue, result); + return result; +} + +/* + * Set the GTK to be used by a device. + * + * The device must be authenticated. + */ +static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + struct usb_device *usb_dev = wusb_dev->usb_dev; + + return usb_control_msg( + usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_DESCRIPTOR, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + USB_DT_KEY << 8 | wusbhc->gtk_index, 0, + &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, + 1000); +} + + +/* FIXME: prototype for adding security */ +int wusb_dev_sec_add(struct wusbhc *wusbhc, + struct usb_device *usb_dev, struct wusb_dev *wusb_dev) +{ + int result, bytes, secd_size; + struct device *dev = &usb_dev->dev; + struct usb_security_descriptor secd; + const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; + void *secd_buf; + const void *itr, *top; + char buf[64]; + + d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev); + result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, + 0, &secd, sizeof(secd)); + if (result < sizeof(secd)) { + dev_err(dev, "Can't read security descriptor or " + "not enough data: %d\n", result); + goto error_secd; + } + secd_size = le16_to_cpu(secd.wTotalLength); + d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n", + result, secd_size); + secd_buf = kmalloc(secd_size, GFP_KERNEL); + if (secd_buf == NULL) { + dev_err(dev, "Can't allocate space for security descriptors\n"); + goto error_secd_alloc; + } + result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, + 0, secd_buf, secd_size); + if (result < secd_size) { + dev_err(dev, "Can't read security descriptor or " + "not enough data: %d\n", result); + goto error_secd_all; + } + d_printf(5, dev, "got %d bytes of sec descriptors\n", result); + bytes = 0; + itr = secd_buf + sizeof(secd); + top = secd_buf + result; + while (itr < top) { + etd = itr; + if (top - itr < sizeof(*etd)) { + dev_err(dev, "BUG: bad device security descriptor; " + "not enough data (%zu vs %zu bytes left)\n", + top - itr, sizeof(*etd)); + break; + } + if (etd->bLength < sizeof(*etd)) { + dev_err(dev, "BUG: bad device encryption descriptor; " + "descriptor is too short " + "(%u vs %zu needed)\n", + etd->bLength, sizeof(*etd)); + break; + } + itr += etd->bLength; + bytes += snprintf(buf + bytes, sizeof(buf) - bytes, + "%s (0x%02x/%02x) ", + wusb_et_name(etd->bEncryptionType), + etd->bEncryptionValue, etd->bAuthKeyIndex); + if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1) + ccm1_etd = etd; + } + /* This code only supports CCM1 as of now. */ + /* FIXME: user has to choose which sec mode to use? + * In theory we want CCM */ + if (ccm1_etd == NULL) { + dev_err(dev, "WUSB device doesn't support CCM1 encryption, " + "can't use!\n"); + result = -EINVAL; + goto error_no_ccm1; + } + wusb_dev->ccm1_etd = *ccm1_etd; + dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", + buf, wusb_et_name(ccm1_etd->bEncryptionType), + ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); + result = 0; + kfree(secd_buf); +out: + d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n", + usb_dev, wusb_dev, result); + return result; + + +error_no_ccm1: +error_secd_all: + kfree(secd_buf); +error_secd_alloc: +error_secd: + goto out; +} + +void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) +{ + /* Nothing so far */ +} + +static void hs_printk(unsigned level, struct device *dev, + struct usb_handshake *hs) +{ + d_printf(level, dev, + " bMessageNumber: %u\n" + " bStatus: %u\n" + " tTKID: %02x %02x %02x\n" + " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n" + " %02x %02x %02x %02x %02x %02x %02x %02x\n" + " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n" + " %02x %02x %02x %02x %02x %02x %02x %02x\n" + " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n", + hs->bMessageNumber, hs->bStatus, + hs->tTKID[2], hs->tTKID[1], hs->tTKID[0], + hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3], + hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7], + hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11], + hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15], + hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3], + hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7], + hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11], + hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15], + hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3], + hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]); +} + +/** + * Update the address of an unauthenticated WUSB device + * + * Once we have successfully authenticated, we take it to addr0 state + * and then to a normal address. + * + * Before the device's address (as known by it) was usb_dev->devnum | + * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. + */ +static int wusb_dev_update_address(struct wusbhc *wusbhc, + struct wusb_dev *wusb_dev) +{ + int result = -ENOMEM; + struct usb_device *usb_dev = wusb_dev->usb_dev; + struct device *dev = &usb_dev->dev; + u8 new_address = wusb_dev->addr & 0x7F; + + /* Set address 0 */ + result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_ADDRESS, 0, + 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "auth failed: can't set address 0: %d\n", + result); + goto error_addr0; + } + result = wusb_set_dev_addr(wusbhc, wusb_dev, 0); + if (result < 0) + goto error_addr0; + usb_ep0_reinit(usb_dev); + + /* Set new (authenticated) address. */ + result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_ADDRESS, 0, + new_address, 0, NULL, 0, + 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "auth failed: can't set address %u: %d\n", + new_address, result); + goto error_addr; + } + result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address); + if (result < 0) + goto error_addr; + usb_ep0_reinit(usb_dev); + usb_dev->authenticated = 1; +error_addr: +error_addr0: + return result; +} + +/* + * + * + */ +/* FIXME: split and cleanup */ +int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, + struct wusb_ckhdid *ck) +{ + int result = -ENOMEM; + struct usb_device *usb_dev = wusb_dev->usb_dev; + struct device *dev = &usb_dev->dev; + u32 tkid; + __le32 tkid_le; + struct usb_handshake *hs; + struct aes_ccm_nonce ccm_n; + u8 mic[8]; + struct wusb_keydvt_in keydvt_in; + struct wusb_keydvt_out keydvt_out; + + hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL); + if (hs == NULL) { + dev_err(dev, "can't allocate handshake data\n"); + goto error_kzalloc; + } + + /* We need to turn encryption before beginning the 4way + * hshake (WUSB1.0[.3.2.2]) */ + result = wusb_dev_set_encryption(usb_dev, 1); + if (result < 0) + goto error_dev_set_encryption; + + tkid = wusbhc_next_tkid(wusbhc, wusb_dev); + tkid_le = cpu_to_le32(tkid); + + hs[0].bMessageNumber = 1; + hs[0].bStatus = 0; + memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID)); + hs[0].bReserved = 0; + memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID)); + get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); + memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ + + d_printf(1, dev, "I: sending hs1:\n"); + hs_printk(2, dev, &hs[0]); + + result = usb_control_msg( + usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_HANDSHAKE, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "Handshake1: request failed: %d\n", result); + goto error_hs1; + } + + /* Handshake 2, from the device -- need to verify fields */ + result = usb_control_msg( + usb_dev, usb_rcvctrlpipe(usb_dev, 0), + USB_REQ_GET_HANDSHAKE, + USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "Handshake2: request failed: %d\n", result); + goto error_hs2; + } + d_printf(1, dev, "got HS2:\n"); + hs_printk(2, dev, &hs[1]); + + result = -EINVAL; + if (hs[1].bMessageNumber != 2) { + dev_err(dev, "Handshake2 failed: bad message number %u\n", + hs[1].bMessageNumber); + goto error_hs2; + } + if (hs[1].bStatus != 0) { + dev_err(dev, "Handshake2 failed: bad status %u\n", + hs[1].bStatus); + goto error_hs2; + } + if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) { + dev_err(dev, "Handshake2 failed: TKID mismatch " + "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n", + hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2], + hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]); + goto error_hs2; + } + if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) { + dev_err(dev, "Handshake2 failed: CDID mismatch\n"); + goto error_hs2; + } + + /* Setup the CCM nonce */ + memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */ + memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid)); + ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr; + ccm_n.dest_addr.data[0] = wusb_dev->addr; + ccm_n.dest_addr.data[1] = 0; + + /* Derive the KCK and PTK from CK, the CCM, H and D nonces */ + memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce)); + memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce)); + result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in); + if (result < 0) { + dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n", + result); + goto error_hs2; + } + d_printf(2, dev, "KCK:\n"); + d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck)); + d_printf(2, dev, "PTK:\n"); + d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); + + /* Compute MIC and verify it */ + result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); + if (result < 0) { + dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n", + result); + goto error_hs2; + } + + d_printf(2, dev, "MIC:\n"); + d_dump(2, dev, mic, sizeof(mic)); + if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { + dev_err(dev, "Handshake2 failed: MIC mismatch\n"); + goto error_hs2; + } + + /* Send Handshake3 */ + hs[2].bMessageNumber = 3; + hs[2].bStatus = 0; + memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID)); + hs[2].bReserved = 0; + memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID)); + memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce)); + result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]); + if (result < 0) { + dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n", + result); + goto error_hs2; + } + + d_printf(1, dev, "I: sending hs3:\n"); + hs_printk(2, dev, &hs[2]); + + result = usb_control_msg( + usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_HANDSHAKE, + USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, + 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "Handshake3: request failed: %d\n", result); + goto error_hs3; + } + + d_printf(1, dev, "I: turning on encryption on host for device\n"); + d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); + result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, + keydvt_out.ptk, sizeof(keydvt_out.ptk)); + if (result < 0) + goto error_wusbhc_set_ptk; + + d_printf(1, dev, "I: setting a GTK\n"); + result = wusb_dev_set_gtk(wusbhc, wusb_dev); + if (result < 0) { + dev_err(dev, "Set GTK for device: request failed: %d\n", + result); + goto error_wusbhc_set_gtk; + } + + /* Update the device's address from unauth to auth */ + if (usb_dev->authenticated == 0) { + d_printf(1, dev, "I: updating addres to auth from non-auth\n"); + result = wusb_dev_update_address(wusbhc, wusb_dev); + if (result < 0) + goto error_dev_update_address; + } + result = 0; + d_printf(1, dev, "I: 4way handshke done, device authenticated\n"); + +error_dev_update_address: +error_wusbhc_set_gtk: +error_wusbhc_set_ptk: +error_hs3: +error_hs2: +error_hs1: + memset(hs, 0, 3*sizeof(hs[0])); + memset(&keydvt_out, 0, sizeof(keydvt_out)); + memset(&keydvt_in, 0, sizeof(keydvt_in)); + memset(&ccm_n, 0, sizeof(ccm_n)); + memset(mic, 0, sizeof(mic)); + if (result < 0) { + /* error path */ + wusb_dev_set_encryption(usb_dev, 0); + } +error_dev_set_encryption: + kfree(hs); +error_kzalloc: + return result; +} + +/* + * Once all connected and authenticated devices have received the new + * GTK, switch the host to using it. + */ +static void wusbhc_gtk_rekey_done_work(struct work_struct *work) +{ + struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work); + size_t key_size = sizeof(wusbhc->gtk.data); + + mutex_lock(&wusbhc->mutex); + + if (--wusbhc->pending_set_gtks == 0) + wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); + + mutex_unlock(&wusbhc->mutex); +} + +static void wusbhc_set_gtk_callback(struct urb *urb) +{ + struct wusbhc *wusbhc = urb->context; + + queue_work(wusbd, &wusbhc->gtk_rekey_done_work); +} + +/** + * wusbhc_gtk_rekey - generate and distribute a new GTK + * @wusbhc: the WUSB host controller + * + * Generate a new GTK and distribute it to all connected and + * authenticated devices. When all devices have the new GTK, the host + * starts using it. + * + * This must be called after every device disconnect (see [WUSB] + * section 6.2.11.2). + */ +void wusbhc_gtk_rekey(struct wusbhc *wusbhc) +{ + static const size_t key_size = sizeof(wusbhc->gtk.data); + int p; + + wusbhc_generate_gtk(wusbhc); + + for (p = 0; p < wusbhc->ports_max; p++) { + struct wusb_dev *wusb_dev; + + wusb_dev = wusbhc->port[p].wusb_dev; + if (!wusb_dev || !wusb_dev->usb_dev | !wusb_dev->usb_dev->authenticated) + continue; + + usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev, + usb_sndctrlpipe(wusb_dev->usb_dev, 0), + (void *)wusb_dev->set_gtk_req, + &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength, + wusbhc_set_gtk_callback, wusbhc); + if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0) + wusbhc->pending_set_gtks++; + } + if (wusbhc->pending_set_gtks == 0) + wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size); +} -- cgit v0.10.2 From 470cc4150367d369bdc98ee04902b04baa2b2464 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 17 Sep 2008 16:34:26 +0100 Subject: wusb: add the Wireless USB core (build-system) Add the WUSB build system (Kconfig and Kbuild) files. Signed-off-by: Greg Kroah-Hartman Signed-off-by: David Vrabel diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index bcefbdd..c23a985 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -97,6 +97,8 @@ source "drivers/usb/core/Kconfig" source "drivers/usb/mon/Kconfig" +source "drivers/usb/wusbcore/Kconfig" + source "drivers/usb/host/Kconfig" source "drivers/usb/musb/Kconfig" diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index a419c42..19ede32 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -19,6 +19,8 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ +obj-$(CONFIG_USB_WUSB) += wusbcore/ + obj-$(CONFIG_USB_ACM) += class/ obj-$(CONFIG_USB_PRINTER) += class/ diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig new file mode 100644 index 0000000..add077e --- /dev/null +++ b/drivers/usb/wusbcore/Kconfig @@ -0,0 +1,17 @@ +# +# Wireless USB Core configuration +# +config USB_WUSB + tristate "Enable Wireless USB extensions" + depends on USB + select UWB + select CRYPTO + select CRYPTO_BLKCIPHER + select CRYPTO_CBC + select CRYPTO_MANAGER + select CRYPTO_AES + help + Enable the host-side support for Wireless USB. + + To compile this support select Y (built in). It is safe to + select even if you don't have the hardware. diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile new file mode 100644 index 0000000..ea1dbfc --- /dev/null +++ b/drivers/usb/wusbcore/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_USB_WUSB) += wusbcore.o + +wusbcore-objs := \ + crypto.o \ + devconnect.o \ + dev-sysfs.o \ + mmc.o \ + pal.o \ + rh.o \ + reservation.o \ + security.o \ + wusbhc.o -- cgit v0.10.2 From 870d5395045bfe8e5213525152682c863a10f8d2 Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:27 +0100 Subject: wusb: add the USB wusb-cbaf driver Add a driver for cable based associated of (Wireless) USB devices. Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile index ea1dbfc..6504f42 100644 --- a/drivers/usb/wusbcore/Makefile +++ b/drivers/usb/wusbcore/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_USB_WUSB) += wusbcore.o +obj-$(CONFIG_USB_WUSB) += wusbcore.o wusb-cbaf.o wusbcore-objs := \ crypto.o \ @@ -10,3 +10,5 @@ wusbcore-objs := \ reservation.o \ security.o \ wusbhc.o + +wusb-cbaf-objs := cbaf.o diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c new file mode 100644 index 0000000..584eabe --- /dev/null +++ b/drivers/usb/wusbcore/cbaf.c @@ -0,0 +1,620 @@ +/* + * Wireless USB - Cable Based Association + * + * + * Copyright (C) 2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * WUSB devices have to be paired (authenticated in WUSB lingo) so + * that they can connect to the system. + * + * One way of pairing is using CBA-Cable Based Authentication, devices + * that can connect via wired or wireless USB. First time you plug + * them with a cable, pairing is done between host and device and + * subsequent times, you can connect wirelessly without having to + * pair. That's the idea. + * + * This driver does nothing Earth shattering. It just provides an + * interface to chat with the wire-connected device so we can get a + * CDID (device ID) that might have been previously associated to a + * CHID (host ID) and to set up a new triplet + * (connection context), with the CK being the secret, or connection + * key. This is the pairing data. + * + * When a device with the CBA capability connects, the probe routine + * just creates a bunch of sysfs files that a user space enumeration + * manager uses to allow it to connect wirelessly to the system or not. + * + * The process goes like this: + * + * 1. device plugs, cbaf is loaded, notifications happen + * + * 2. the connection manager sees a device with CBAF capability (the + * wusb_{host_info,cdid,cc} files are in /sys/device/blah/OURDEVICE). + * + * 3. CM (connection manager) writes the CHID (host ID) and a host + * name into the wusb_host_info file. This gets sent to the device. + * + * 4. CM cats the wusb_cdid file; this asks the device if it has any + * CDID associated to the CHDI we just wrote before. If it does, it + * is printed, along with the device 'friendly name' and the band + * groups the device supports. + * + * 5. CM looks up its database + * + * 5.1 If it has a matching CHID,CDID entry, the device has been + * authorized before (paired). Now we can optionally ask the user + * if he wants to allow the device to connect. Then we generate a + * new CDID and CK, send it to the device and update the database + * (writing to the wusb_cc file so they are uploaded to the device). + * + * 5.2 If the CDID is zero (or we didn't find a matching CDID in our + * database), we assume the device is not known. We ask the user + * if s/he wants to allow the device to be connected wirelessly + * to the system. If nope, nothing else is done (FIXME: maybe + * send a zero CDID to clean up our CHID?). If yes, we generate + * random CDID and CKs (and write them to the wusb_cc file so + * they are uploaded to the device). + * + * 6. device is unplugged + * + * When the device tries to connect wirelessly, it will present it's + * CDID to the WUSB host controller with ID CHID, which will query the + * database. If found, the host will (with a 4way handshake) challenge + * the device to demonstrate it has the CK secret key (from our + * database) without actually exchanging it. Once satisfied, crypto + * keys are derived from the CK, the device is connected and all + * communication is crypted. + * + * + * NOTES ABOUT THE IMPLEMENTATION + * + * The descriptors sent back and forth use this horrible format from + * hell on which each field is actually a field ID, field length and + * then the field itself. How stupid can that get, taking into account + * the structures are defined by the spec?? oh well. + * + * + * FIXME: we don't provide a way to tell the device the pairing failed + * (ie: send a CC_DATA_FAIL). Should add some day. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef D_LOCAL +#define D_LOCAL 6 +#include + +/* An instance of a Cable-Based-Association-Framework device */ +struct cbaf { + struct usb_device *usb_dev; + struct usb_interface *usb_iface; + void *buffer; + size_t buffer_size; + + struct wusb_ckhdid chid;/* Host Information */ + char host_name[65]; /* max length: + Assoc Models Suplement 1.0[T4-7] */ + u16 host_band_groups; + + struct wusb_ckhdid cdid;/* Device Information */ + char device_name[65]; /* max length: + Assoc Models Suplement 1.0[T4-7] */ + u16 device_band_groups; + struct wusb_ckhdid ck; /* Connection Key */ +}; + +/* + * Verify that a CBAF USB-interface has what we need + * + * (like we care, we are going to fail the enumeration if not :) + * + * FIXME: ugly function, need to split + */ +static int cbaf_check(struct cbaf *cbaf) +{ + int result; + struct device *dev = &cbaf->usb_iface->dev; + struct wusb_cbaf_assoc_info *assoc_info; + struct wusb_cbaf_assoc_request *assoc_request; + size_t assoc_size; + void *itr, *top; + unsigned ar_index; + int ar_rhi_idx = -1, ar_assoc_idx = -1; + + result = usb_control_msg( + cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), + CBAF_REQ_GET_ASSOCIATION_INFORMATION, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, + cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "cannot get available association types: %d\n", + result); + goto error_get_assoc_types; + } + assoc_info = cbaf->buffer; + if (result < sizeof(*assoc_info)) { + dev_err(dev, "not enough data to decode association info " + "header (%zu vs %zu bytes required)\n", + (size_t)result, sizeof(*assoc_info)); + goto error_bad_header; + } + assoc_size = le16_to_cpu(assoc_info->Length); + if (result < assoc_size) { + dev_err(dev, "not enough data to decode association info " + "(%zu vs %zu bytes required)\n", + (size_t)assoc_size, sizeof(*assoc_info)); + goto error_bad_data; + } + /* + * From now on, we just verify, but won't error out unless we + * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE} + * types. + */ + ar_index = 0; + itr = cbaf->buffer + sizeof(*assoc_info); + top = cbaf->buffer + assoc_size; + d_printf(1, dev, "Found %u association requests (%zu bytes)\n", + assoc_info->NumAssociationRequests, assoc_size); + while (itr < top) { + u16 ar_type, ar_subtype; + u32 ar_size; + const char *ar_name; + + assoc_request = itr; + if (top - itr < sizeof(*assoc_request)) { + dev_err(dev, "not enough data to decode associaton " + "request (%zu vs %zu bytes needed)\n", + top - itr, sizeof(*assoc_request)); + break; + } + ar_type = le16_to_cpu(assoc_request->AssociationTypeId); + ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId); + ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize); + switch (ar_type) { + case AR_TYPE_WUSB: + /* Verify we have what is mandated by AMS1.0 */ + switch (ar_subtype) { + case AR_TYPE_WUSB_RETRIEVE_HOST_INFO: + ar_name = "retrieve_host_info"; + ar_rhi_idx = ar_index; + break; + case AR_TYPE_WUSB_ASSOCIATE: + /* send assoc data */ + ar_name = "associate"; + ar_assoc_idx = ar_index; + break; + default: + ar_name = "unknown"; + }; + break; + default: + ar_name = "unknown"; + }; + d_printf(1, dev, "association request #%02u: 0x%04x/%04x " + "(%zu bytes): %s\n", + assoc_request->AssociationDataIndex, ar_type, + ar_subtype, (size_t)ar_size, ar_name); + + itr += sizeof(*assoc_request); + ar_index++; + } + if (ar_rhi_idx == -1) { + dev_err(dev, "Missing RETRIEVE_HOST_INFO association " + "request\n"); + goto error_bad_reqs; + } + if (ar_assoc_idx == -1) { + dev_err(dev, "Missing ASSOCIATE association request\n"); + goto error_bad_reqs; + } + return 0; + +error_bad_header: +error_bad_data: +error_bad_reqs: +error_get_assoc_types: + return -EINVAL; +} + +static const struct wusb_cbaf_host_info cbaf_host_info_defaults = { + .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, + .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB), + .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, + .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO), + .CHID_hdr = WUSB_AR_CHID, + .LangID_hdr = WUSB_AR_LangID, + .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName, +}; + +/* Send WUSB host information (CHID and name) to a CBAF device */ +static int cbaf_send_host_info(struct cbaf *cbaf) +{ + struct wusb_cbaf_host_info *hi; + size_t hi_size; + + hi = cbaf->buffer; + memset(hi, 0, sizeof(*hi)); + *hi = cbaf_host_info_defaults; + hi->CHID = cbaf->chid; + hi->LangID = 0; /* FIXME: I guess... */ + strncpy(hi->HostFriendlyName, cbaf->host_name, + hi->HostFriendlyName_hdr.len); + hi->HostFriendlyName_hdr.len = + cpu_to_le16(strlen(hi->HostFriendlyName)); + hi_size = sizeof(*hi) + strlen(hi->HostFriendlyName); + return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), + CBAF_REQ_SET_ASSOCIATION_RESPONSE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0x0101, + cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, + hi, hi_size, 1000 /* FIXME: arbitrary */); +} + +/* Show current CHID info we have set from user space */ +static ssize_t cbaf_wusb_host_info_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + char pr_chid[WUSB_CKHDID_STRSIZE]; + + ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid); + return scnprintf(buf, PAGE_SIZE, "CHID: %s\nName: %s\n", + pr_chid, cbaf->host_name); +} + +/* + * Get a host info CHID from user space and send it to the device. + * + * The user can recover a CC from the device associated to that CHID + * by cat'ing wusb_connection_context. + */ +static ssize_t cbaf_wusb_host_info_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + ssize_t result; + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + result = sscanf(buf, + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%04hx %64s\n", + &cbaf->chid.data[0] , &cbaf->chid.data[1], + &cbaf->chid.data[2] , &cbaf->chid.data[3], + &cbaf->chid.data[4] , &cbaf->chid.data[5], + &cbaf->chid.data[6] , &cbaf->chid.data[7], + &cbaf->chid.data[8] , &cbaf->chid.data[9], + &cbaf->chid.data[10], &cbaf->chid.data[11], + &cbaf->chid.data[12], &cbaf->chid.data[13], + &cbaf->chid.data[14], &cbaf->chid.data[15], + &cbaf->host_band_groups, cbaf->host_name); + if (result != 18) { + dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits, " + "a 16 bit hex band group mask " + "and a host name, got only %d)\n", (int)result); + return -EINVAL; + } + result = cbaf_send_host_info(cbaf); + if (result < 0) + dev_err(dev, "Couldn't send host information to device: %d\n", + (int)result); + else + d_printf(1, dev, "HI sent, wusb_cc can be read now\n"); + return result < 0 ? result : size; +} +static DEVICE_ATTR(wusb_host_info, 0600, cbaf_wusb_host_info_show, + cbaf_wusb_host_info_store); + +static const struct wusb_cbaf_device_info cbaf_device_info_defaults = { + .Length_hdr = WUSB_AR_Length, + .CDID_hdr = WUSB_AR_CDID, + .BandGroups_hdr = WUSB_AR_BandGroups, + .LangID_hdr = WUSB_AR_LangID, + .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName, +}; + +/* + * Get device's information (CDID) associated to CHID + * + * The device will return it's information (CDID, name, bandgroups) + * associated to the CHID we have set before, or 0 CDID and default + * name and bandgroup if no CHID set or unknown. + */ +static int cbaf_cdid_get(struct cbaf *cbaf) +{ + int result; + struct device *dev = &cbaf->usb_iface->dev; + struct wusb_cbaf_device_info *di; + size_t needed, dev_name_size; + + di = cbaf->buffer; + result = usb_control_msg( + cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), + CBAF_REQ_GET_ASSOCIATION_REQUEST, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, + di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "Cannot request device information: %d\n", result); + goto error_req_di; + } + needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length); + if (result < needed) { + dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs " + "%zu bytes needed)\n", (size_t)result, needed); + goto error_bad_di; + } + cbaf->cdid = di->CDID; + dev_name_size = le16_to_cpu(di->DeviceFriendlyName_hdr.len); + dev_name_size = dev_name_size > 65 - 1 ? 65 - 1 : dev_name_size; + memcpy(cbaf->device_name, di->DeviceFriendlyName, dev_name_size); + cbaf->device_name[dev_name_size] = 0; + cbaf->device_band_groups = le16_to_cpu(di->BandGroups); + result = 0; +error_req_di: +error_bad_di: + return result; +} + +/* + * Get device information and print it to sysfs + * + * See cbaf_cdid_get() + */ +static ssize_t cbaf_wusb_cdid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t result; + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + char pr_cdid[WUSB_CKHDID_STRSIZE]; + + result = cbaf_cdid_get(cbaf); + if (result < 0) { + dev_err(dev, "Cannot read device information: %d\n", + (int)result); + goto error_get_di; + } + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid); + result = scnprintf(buf, PAGE_SIZE, + "CDID: %s\nName: %s\nBand_groups: 0x%04x\n", + pr_cdid, cbaf->device_name, + cbaf->device_band_groups); +error_get_di: + return result; +} +static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, NULL); + +static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = { + .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, + .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB), + .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, + .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE), + .Length_hdr = WUSB_AR_Length, + .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)), + .ConnectionContext_hdr = WUSB_AR_ConnectionContext, + .BandGroups_hdr = WUSB_AR_BandGroups, +}; + +static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = { + .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, + .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId, + .Length_hdr = WUSB_AR_Length, + .AssociationStatus_hdr = WUSB_AR_AssociationStatus, +}; + +/* + * Send a new CC to the device + * + * So we update the CK and send the whole thing to the device + */ +static int cbaf_cc_upload(struct cbaf *cbaf) +{ + int result; + struct device *dev = &cbaf->usb_iface->dev; + struct wusb_cbaf_cc_data *ccd; + char pr_cdid[WUSB_CKHDID_STRSIZE]; + + ccd = cbaf->buffer; + *ccd = cbaf_cc_data_defaults; + ccd->CHID = cbaf->chid; + ccd->CDID = cbaf->cdid; + ccd->CK = cbaf->ck; + ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups); + result = usb_control_msg( + cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), + CBAF_REQ_SET_ASSOCIATION_RESPONSE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, + ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */); + d_printf(1, dev, "Uploaded CC:\n"); + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID); + d_printf(1, dev, " CHID %s\n", pr_cdid); + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID); + d_printf(1, dev, " CDID %s\n", pr_cdid); + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CK); + d_printf(1, dev, " CK %s\n", pr_cdid); + d_printf(1, dev, " bandgroups 0x%04x\n", cbaf->host_band_groups); + return result; +} + +/* + * Send a new CC to the device + * + * We take the CDID and CK from user space, the rest from the info we + * set with host_info. + */ +static ssize_t cbaf_wusb_cc_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + ssize_t result; + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + result = sscanf(buf, + "CDID: %02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx\n" + "CK: %02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx\n", + &cbaf->cdid.data[0] , &cbaf->cdid.data[1], + &cbaf->cdid.data[2] , &cbaf->cdid.data[3], + &cbaf->cdid.data[4] , &cbaf->cdid.data[5], + &cbaf->cdid.data[6] , &cbaf->cdid.data[7], + &cbaf->cdid.data[8] , &cbaf->cdid.data[9], + &cbaf->cdid.data[10], &cbaf->cdid.data[11], + &cbaf->cdid.data[12], &cbaf->cdid.data[13], + &cbaf->cdid.data[14], &cbaf->cdid.data[15], + + &cbaf->ck.data[0] , &cbaf->ck.data[1], + &cbaf->ck.data[2] , &cbaf->ck.data[3], + &cbaf->ck.data[4] , &cbaf->ck.data[5], + &cbaf->ck.data[6] , &cbaf->ck.data[7], + &cbaf->ck.data[8] , &cbaf->ck.data[9], + &cbaf->ck.data[10], &cbaf->ck.data[11], + &cbaf->ck.data[12], &cbaf->ck.data[13], + &cbaf->ck.data[14], &cbaf->ck.data[15]); + if (result != 32) { + dev_err(dev, "Unrecognized CHID/CK (need 32 8-bit " + "hex digits, got only %d)\n", (int)result); + return -EINVAL; + } + result = cbaf_cc_upload(cbaf); + if (result < 0) + dev_err(dev, "Couldn't upload connection context: %d\n", + (int)result); + else + d_printf(1, dev, "Connection context uploaded\n"); + return result < 0 ? result : size; +} +static DEVICE_ATTR(wusb_cc, 0600, NULL, cbaf_wusb_cc_store); + +static struct attribute *cbaf_dev_attrs[] = { + &dev_attr_wusb_host_info.attr, + &dev_attr_wusb_cdid.attr, + &dev_attr_wusb_cc.attr, + NULL, +}; + +static struct attribute_group cbaf_dev_attr_group = { + .name = NULL, /* we want them in the same directory */ + .attrs = cbaf_dev_attrs, +}; + +static int cbaf_probe(struct usb_interface *iface, + const struct usb_device_id *id) +{ + int result; + struct cbaf *cbaf; + struct device *dev = &iface->dev; + + result = -ENOMEM; + cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL); + if (cbaf == NULL) { + dev_err(dev, "Unable to allocate instance\n"); + goto error_kzalloc; + } + cbaf->buffer = kmalloc(512, GFP_KERNEL); + if (cbaf->buffer == NULL) + goto error_kmalloc_buffer; + cbaf->buffer_size = 512; + cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface)); + cbaf->usb_iface = usb_get_intf(iface); + result = cbaf_check(cbaf); + if (result < 0) + goto error_check; + result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group); + if (result < 0) { + dev_err(dev, "Can't register sysfs attr group: %d\n", result); + goto error_create_group; + } + usb_set_intfdata(iface, cbaf); + d_printf(2, dev, "CBA attached\n"); + return 0; + +error_create_group: +error_check: + kfree(cbaf->buffer); +error_kmalloc_buffer: + kfree(cbaf); +error_kzalloc: + return result; +} + +static void cbaf_disconnect(struct usb_interface *iface) +{ + struct cbaf *cbaf = usb_get_intfdata(iface); + struct device *dev = &iface->dev; + sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group); + usb_set_intfdata(iface, NULL); + usb_put_intf(iface); + kfree(cbaf->buffer); + /* paranoia: clean up crypto keys */ + memset(cbaf, 0, sizeof(*cbaf)); + kfree(cbaf); + d_printf(1, dev, "CBA detached\n"); +} + +static struct usb_device_id cbaf_id_table[] = { + { USB_INTERFACE_INFO(0xef, 0x03, 0x01), }, + { }, +}; +MODULE_DEVICE_TABLE(usb, cbaf_id_table); + +static struct usb_driver cbaf_driver = { + .name = "wusb-cbaf", + .id_table = cbaf_id_table, + .probe = cbaf_probe, + .disconnect = cbaf_disconnect, +}; + +static int __init cbaf_driver_init(void) +{ + return usb_register(&cbaf_driver); +} +module_init(cbaf_driver_init); + +static void __exit cbaf_driver_exit(void) +{ + usb_deregister(&cbaf_driver); +} +module_exit(cbaf_driver_exit); + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Wireless USB Cable Based Association"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 7e6133aa42920ea87ad9791a0fb2b95d1a23b8f9 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:28 +0100 Subject: wusb: WHCI host controller driver A driver for Wireless USB host controllers that comply with the Wireless Host Controller Interface (HCI) specification as published by Intel. The latest publically available version of the specification (0.95) is supported (except for isochronous transfers). Build fixes by Randy Dunlap Signed-off-by: David Vrabel diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 228797e..d3ba351 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -305,3 +305,15 @@ config SUPERH_ON_CHIP_R8A66597 help This driver enables support for the on-chip R8A66597 in the SH7366 and SH7723 processors. + +config USB_WHCI_HCD + tristate "Wireless USB Host Controller Interface (WHCI) driver" + depends on PCI && USB + select USB_WUSB + select UWB_WHCI + help + A driver for PCI-based Wireless USB Host Controllers that are + compliant with the WHCI specification. + + To compile this driver a module, choose M here: the module + will be called "whci-hcd". diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index f1edda2..c90cf86 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -8,6 +8,8 @@ endif isp1760-objs := isp1760-hcd.o isp1760-if.o +obj-$(CONFIG_USB_WHCI_HCD) += whci/ + obj-$(CONFIG_PCI) += pci-quirks.o obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild new file mode 100644 index 0000000..26a3871 --- /dev/null +++ b/drivers/usb/host/whci/Kbuild @@ -0,0 +1,11 @@ +obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o + +whci-hcd-y := \ + asl.o \ + hcd.o \ + hw.o \ + init.o \ + int.o \ + pzl.o \ + qset.o \ + wusb.o diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c new file mode 100644 index 0000000..4d7078e5 --- /dev/null +++ b/drivers/usb/host/whci/asl.c @@ -0,0 +1,367 @@ +/* + * Wireless Host Controller (WHC) asynchronous schedule management. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#define D_LOCAL 0 +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +#if D_LOCAL >= 4 +static void dump_asl(struct whc *whc, const char *tag) +{ + struct device *dev = &whc->umc->dev; + struct whc_qset *qset; + + d_printf(4, dev, "ASL %s\n", tag); + + list_for_each_entry(qset, &whc->async_list, list_node) { + dump_qset(qset, dev); + } +} +#else +static inline void dump_asl(struct whc *whc, const char *tag) +{ +} +#endif + + +static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, + struct whc_qset **next, struct whc_qset **prev) +{ + struct list_head *n, *p; + + BUG_ON(list_empty(&whc->async_list)); + + n = qset->list_node.next; + if (n == &whc->async_list) + n = n->next; + p = qset->list_node.prev; + if (p == &whc->async_list) + p = p->prev; + + *next = container_of(n, struct whc_qset, list_node); + *prev = container_of(p, struct whc_qset, list_node); + +} + +static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset) +{ + list_move(&qset->list_node, &whc->async_list); + qset->in_sw_list = true; +} + +static void asl_qset_insert(struct whc *whc, struct whc_qset *qset) +{ + struct whc_qset *next, *prev; + + qset_clear(whc, qset); + + /* Link into ASL. */ + qset_get_next_prev(whc, qset, &next, &prev); + whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma); + whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma); + qset->in_hw_list = true; +} + +static void asl_qset_remove(struct whc *whc, struct whc_qset *qset) +{ + struct whc_qset *prev, *next; + + qset_get_next_prev(whc, qset, &next, &prev); + + list_move(&qset->list_node, &whc->async_removed_list); + qset->in_sw_list = false; + + /* + * No more qsets in the ASL? The caller must stop the ASL as + * it's no longer valid. + */ + if (list_empty(&whc->async_list)) + return; + + /* Remove from ASL. */ + whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma); + qset->in_hw_list = false; +} + +/** + * process_qset - process any recently inactivated or halted qTDs in a + * qset. + * + * After inactive qTDs are removed, new qTDs can be added if the + * urb queue still contains URBs. + * + * Returns any additional WUSBCMD bits for the ASL sync command (i.e., + * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed). + */ +static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) +{ + enum whc_update update = 0; + uint32_t status = 0; + + while (qset->ntds) { + struct whc_qtd *td; + int t; + + t = qset->td_start; + td = &qset->qtd[qset->td_start]; + status = le32_to_cpu(td->status); + + /* + * Nothing to do with a still active qTD. + */ + if (status & QTD_STS_ACTIVE) + break; + + if (status & QTD_STS_HALTED) { + /* Ug, an error. */ + process_halted_qtd(whc, qset, td); + goto done; + } + + /* Mmm, a completed qTD. */ + process_inactive_qtd(whc, qset, td); + } + + update |= qset_add_qtds(whc, qset); + +done: + /* + * Remove this qset from the ASL if requested, but only if has + * no qTDs. + */ + if (qset->remove && qset->ntds == 0) { + asl_qset_remove(whc, qset); + update |= WHC_UPDATE_REMOVED; + } + return update; +} + +void asl_start(struct whc *whc) +{ + struct whc_qset *qset; + + qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); + + le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR); + + whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN); + whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, + WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED, + 1000, "start ASL"); +} + +void asl_stop(struct whc *whc) +{ + whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0); + whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, + WUSBSTS_ASYNC_SCHED, 0, + 1000, "stop ASL"); +} + +void asl_update(struct whc *whc, uint32_t wusbcmd) +{ + whc_write_wusbcmd(whc, wusbcmd, wusbcmd); + wait_event(whc->async_list_wq, + (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); +} + +/** + * scan_async_work - scan the ASL for qsets to process. + * + * Process each qset in the ASL in turn and then signal the WHC that + * the ASL has been updated. + * + * Then start, stop or update the asynchronous schedule as required. + */ +void scan_async_work(struct work_struct *work) +{ + struct whc *whc = container_of(work, struct whc, async_work); + struct whc_qset *qset, *t; + enum whc_update update = 0; + + spin_lock_irq(&whc->lock); + + dump_asl(whc, "before processing"); + + /* + * Transerve the software list backwards so new qsets can be + * safely inserted into the ASL without making it non-circular. + */ + list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) { + if (!qset->in_hw_list) { + asl_qset_insert(whc, qset); + update |= WHC_UPDATE_ADDED; + } + + update |= process_qset(whc, qset); + } + + dump_asl(whc, "after processing"); + + spin_unlock_irq(&whc->lock); + + if (update) { + uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB; + if (update & WHC_UPDATE_REMOVED) + wusbcmd |= WUSBCMD_ASYNC_QSET_RM; + asl_update(whc, wusbcmd); + } + + /* + * Now that the ASL is updated, complete the removal of any + * removed qsets. + */ + spin_lock(&whc->lock); + + list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { + qset_remove_complete(whc, qset); + } + + spin_unlock(&whc->lock); +} + +/** + * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL). + * @whc: the WHCI host controller + * @urb: the URB to enqueue + * @mem_flags: flags for any memory allocations + * + * The qset for the endpoint is obtained and the urb queued on to it. + * + * Work is scheduled to update the hardware's view of the ASL. + */ +int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) +{ + struct whc_qset *qset; + int err; + unsigned long flags; + + spin_lock_irqsave(&whc->lock, flags); + + qset = get_qset(whc, urb, GFP_ATOMIC); + if (qset == NULL) + err = -ENOMEM; + else + err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); + if (!err) { + usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); + if (!qset->in_sw_list) + asl_qset_insert_begin(whc, qset); + } + + spin_unlock_irqrestore(&whc->lock, flags); + + if (!err) + queue_work(whc->workqueue, &whc->async_work); + + return 0; +} + +/** + * asl_urb_dequeue - remove an URB (qset) from the async list. + * @whc: the WHCI host controller + * @urb: the URB to dequeue + * @status: the current status of the URB + * + * URBs that do yet have qTDs can simply be removed from the software + * queue, otherwise the qset must be removed from the ASL so the qTDs + * can be removed. + */ +int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) +{ + struct whc_urb *wurb = urb->hcpriv; + struct whc_qset *qset = wurb->qset; + struct whc_std *std, *t; + int ret; + unsigned long flags; + + spin_lock_irqsave(&whc->lock, flags); + + ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); + if (ret < 0) + goto out; + + list_for_each_entry_safe(std, t, &qset->stds, list_node) { + if (std->urb == urb) + qset_free_std(whc, std); + else + std->qtd = NULL; /* so this std is re-added when the qset is */ + } + + asl_qset_remove(whc, qset); + wurb->status = status; + wurb->is_async = true; + queue_work(whc->workqueue, &wurb->dequeue_work); + +out: + spin_unlock_irqrestore(&whc->lock, flags); + + return ret; +} + +/** + * asl_qset_delete - delete a qset from the ASL + */ +void asl_qset_delete(struct whc *whc, struct whc_qset *qset) +{ + qset->remove = 1; + queue_work(whc->workqueue, &whc->async_work); + qset_delete(whc, qset); +} + +/** + * asl_init - initialize the asynchronous schedule list + * + * A dummy qset with no qTDs is added to the ASL to simplify removing + * qsets (no need to stop the ASL when the last qset is removed). + */ +int asl_init(struct whc *whc) +{ + struct whc_qset *qset; + + qset = qset_alloc(whc, GFP_KERNEL); + if (qset == NULL) + return -ENOMEM; + + asl_qset_insert_begin(whc, qset); + asl_qset_insert(whc, qset); + + return 0; +} + +/** + * asl_clean_up - free ASL resources + * + * The ASL is stopped and empty except for the dummy qset. + */ +void asl_clean_up(struct whc *whc) +{ + struct whc_qset *qset; + + if (!list_empty(&whc->async_list)) { + qset = list_first_entry(&whc->async_list, struct whc_qset, list_node); + list_del(&qset->list_node); + qset_free(whc, qset); + } +} diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c new file mode 100644 index 0000000..ef3ad4d --- /dev/null +++ b/drivers/usb/host/whci/hcd.c @@ -0,0 +1,339 @@ +/* + * Wireless Host Controller (WHC) driver. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +/* + * One time initialization. + * + * Nothing to do here. + */ +static int whc_reset(struct usb_hcd *usb_hcd) +{ + return 0; +} + +/* + * Start the wireless host controller. + * + * Start device notification. + * + * Put hc into run state, set DNTS parameters. + */ +static int whc_start(struct usb_hcd *usb_hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + u8 bcid; + int ret; + + mutex_lock(&wusbhc->mutex); + + le_writel(WUSBINTR_GEN_CMD_DONE + | WUSBINTR_HOST_ERR + | WUSBINTR_ASYNC_SCHED_SYNCED + | WUSBINTR_DNTS_INT + | WUSBINTR_ERR_INT + | WUSBINTR_INT, + whc->base + WUSBINTR); + + /* set cluster ID */ + bcid = wusb_cluster_id_get(); + ret = whc_set_cluster_id(whc, bcid); + if (ret < 0) + goto out; + wusbhc->cluster_id = bcid; + + /* start HC */ + whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN); + + usb_hcd->uses_new_polling = 1; + usb_hcd->poll_rh = 1; + usb_hcd->state = HC_STATE_RUNNING; + +out: + mutex_unlock(&wusbhc->mutex); + return ret; +} + + +/* + * Stop the wireless host controller. + * + * Stop device notification. + * + * Wait for pending transfer to stop? Put hc into stop state? + */ +static void whc_stop(struct usb_hcd *usb_hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + + mutex_lock(&wusbhc->mutex); + + wusbhc_stop(wusbhc); + + /* stop HC */ + le_writel(0, whc->base + WUSBINTR); + whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); + whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, + WUSBSTS_HCHALTED, WUSBSTS_HCHALTED, + 100, "HC to halt"); + + wusb_cluster_id_put(wusbhc->cluster_id); + + mutex_unlock(&wusbhc->mutex); +} + +static int whc_get_frame_number(struct usb_hcd *usb_hcd) +{ + /* Frame numbers are not applicable to WUSB. */ + return -ENOSYS; +} + + +/* + * Queue an URB to the ASL or PZL + */ +static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, + gfp_t mem_flags) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + int ret; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_INTERRUPT: + ret = pzl_urb_enqueue(whc, urb, mem_flags); + break; + case PIPE_ISOCHRONOUS: + dev_err(&whc->umc->dev, "isochronous transfers unsupported\n"); + ret = -ENOTSUPP; + break; + case PIPE_CONTROL: + case PIPE_BULK: + default: + ret = asl_urb_enqueue(whc, urb, mem_flags); + break; + }; + + return ret; +} + +/* + * Remove a queued URB from the ASL or PZL. + */ +static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + int ret; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_INTERRUPT: + ret = pzl_urb_dequeue(whc, urb, status); + break; + case PIPE_ISOCHRONOUS: + ret = -ENOTSUPP; + break; + case PIPE_CONTROL: + case PIPE_BULK: + default: + ret = asl_urb_dequeue(whc, urb, status); + break; + }; + + return ret; +} + +/* + * Wait for all URBs to the endpoint to be completed, then delete the + * qset. + */ +static void whc_endpoint_disable(struct usb_hcd *usb_hcd, + struct usb_host_endpoint *ep) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + struct whc_qset *qset; + + qset = ep->hcpriv; + if (qset) { + ep->hcpriv = NULL; + if (usb_endpoint_xfer_bulk(&ep->desc) + || usb_endpoint_xfer_control(&ep->desc)) + asl_qset_delete(whc, qset); + else + pzl_qset_delete(whc, qset); + } +} + +static struct hc_driver whc_hc_driver = { + .description = "whci-hcd", + .product_desc = "Wireless host controller", + .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd), + .irq = whc_int_handler, + .flags = HCD_USB2, + + .reset = whc_reset, + .start = whc_start, + .stop = whc_stop, + .get_frame_number = whc_get_frame_number, + .urb_enqueue = whc_urb_enqueue, + .urb_dequeue = whc_urb_dequeue, + .endpoint_disable = whc_endpoint_disable, + + .hub_status_data = wusbhc_rh_status_data, + .hub_control = wusbhc_rh_control, + .bus_suspend = wusbhc_rh_suspend, + .bus_resume = wusbhc_rh_resume, + .start_port_reset = wusbhc_rh_start_port_reset, +}; + +static int whc_probe(struct umc_dev *umc) +{ + int ret = -ENOMEM; + struct usb_hcd *usb_hcd; + struct wusbhc *wusbhc = NULL; + struct whc *whc = NULL; + struct device *dev = &umc->dev; + + usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci"); + if (usb_hcd == NULL) { + dev_err(dev, "unable to create hcd\n"); + goto error; + } + + usb_hcd->wireless = 1; + + wusbhc = usb_hcd_to_wusbhc(usb_hcd); + whc = wusbhc_to_whc(wusbhc); + whc->umc = umc; + + ret = whc_init(whc); + if (ret) + goto error; + + wusbhc->dev = dev; + wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent); + if (!wusbhc->uwb_rc) { + ret = -ENODEV; + dev_err(dev, "cannot get radio controller\n"); + goto error; + } + + if (whc->n_devices > USB_MAXCHILDREN) { + dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n", + whc->n_devices); + wusbhc->ports_max = USB_MAXCHILDREN; + } else + wusbhc->ports_max = whc->n_devices; + wusbhc->mmcies_max = whc->n_mmc_ies; + wusbhc->start = whc_wusbhc_start; + wusbhc->stop = whc_wusbhc_stop; + wusbhc->mmcie_add = whc_mmcie_add; + wusbhc->mmcie_rm = whc_mmcie_rm; + wusbhc->dev_info_set = whc_dev_info_set; + wusbhc->bwa_set = whc_bwa_set; + wusbhc->set_num_dnts = whc_set_num_dnts; + wusbhc->set_ptk = whc_set_ptk; + wusbhc->set_gtk = whc_set_gtk; + + ret = wusbhc_create(wusbhc); + if (ret) + goto error_wusbhc_create; + + ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED); + if (ret) { + dev_err(dev, "cannot add HCD: %d\n", ret); + goto error_usb_add_hcd; + } + + ret = wusbhc_b_create(wusbhc); + if (ret) { + dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret); + goto error_wusbhc_b_create; + } + + return 0; + +error_wusbhc_b_create: + usb_remove_hcd(usb_hcd); +error_usb_add_hcd: + wusbhc_destroy(wusbhc); +error_wusbhc_create: + uwb_rc_put(wusbhc->uwb_rc); +error: + whc_clean_up(whc); + if (usb_hcd) + usb_put_hcd(usb_hcd); + return ret; +} + + +static void whc_remove(struct umc_dev *umc) +{ + struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev); + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + + if (usb_hcd) { + wusbhc_b_destroy(wusbhc); + usb_remove_hcd(usb_hcd); + wusbhc_destroy(wusbhc); + uwb_rc_put(wusbhc->uwb_rc); + whc_clean_up(whc); + usb_put_hcd(usb_hcd); + } +} + +static struct umc_driver whci_hc_driver = { + .name = "whci-hcd", + .cap_id = UMC_CAP_ID_WHCI_WUSB_HC, + .probe = whc_probe, + .remove = whc_remove, +}; + +static int __init whci_hc_driver_init(void) +{ + return umc_driver_register(&whci_hc_driver); +} +module_init(whci_hc_driver_init); + +static void __exit whci_hc_driver_exit(void) +{ + umc_driver_unregister(&whci_hc_driver); +} +module_exit(whci_hc_driver_exit); + +/* PCI device ID's that we handle (so it gets loaded) */ +static struct pci_device_id whci_hcd_id_table[] = { + { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, + { /* empty last entry */ } +}; +MODULE_DEVICE_TABLE(pci, whci_hcd_id_table); + +MODULE_DESCRIPTION("WHCI Wireless USB host controller driver"); +MODULE_AUTHOR("Cambridge Silicon Radio Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c new file mode 100644 index 0000000..ac86e59 --- /dev/null +++ b/drivers/usb/host/whci/hw.c @@ -0,0 +1,87 @@ +/* + * Wireless Host Controller (WHC) hardware access helpers. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val) +{ + unsigned long flags; + u32 cmd; + + spin_lock_irqsave(&whc->lock, flags); + + cmd = le_readl(whc->base + WUSBCMD); + cmd = (cmd & ~mask) | val; + le_writel(cmd, whc->base + WUSBCMD); + + spin_unlock_irqrestore(&whc->lock, flags); +} + +/** + * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register + * @whc: the WHCI HC + * @cmd: command to start. + * @params: parameters for the command (the WUSBGENCMDPARAMS register value). + * @addr: pointer to any data for the command (may be NULL). + * @len: length of the data (if any). + */ +int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) +{ + unsigned long flags; + dma_addr_t dma_addr; + int t; + + mutex_lock(&whc->mutex); + + /* Wait for previous command to complete. */ + t = wait_event_timeout(whc->cmd_wq, + (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0, + WHC_GENCMD_TIMEOUT_MS); + if (t == 0) { + dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", + le_readl(whc->base + WUSBGENCMDSTS), + le_readl(whc->base + WUSBGENCMDPARAMS)); + return -ETIMEDOUT; + } + + if (addr) { + memcpy(whc->gen_cmd_buf, addr, len); + dma_addr = whc->gen_cmd_buf_dma; + } else + dma_addr = 0; + + /* Poke registers to start cmd. */ + spin_lock_irqsave(&whc->lock, flags); + + le_writel(params, whc->base + WUSBGENCMDPARAMS); + le_writeq(dma_addr, whc->base + WUSBGENADDR); + + le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd, + whc->base + WUSBGENCMDSTS); + + spin_unlock_irqrestore(&whc->lock, flags); + + mutex_unlock(&whc->mutex); + + return 0; +} diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c new file mode 100644 index 0000000..34a783c --- /dev/null +++ b/drivers/usb/host/whci/init.c @@ -0,0 +1,188 @@ +/* + * Wireless Host Controller (WHC) initialization. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +/* + * Reset the host controller. + */ +static void whc_hw_reset(struct whc *whc) +{ + le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD); + whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0, + 100, "reset"); +} + +static void whc_hw_init_di_buf(struct whc *whc) +{ + int d; + + /* Disable all entries in the Device Information buffer. */ + for (d = 0; d < whc->n_devices; d++) + whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE; + + le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR); +} + +static void whc_hw_init_dn_buf(struct whc *whc) +{ + /* Clear the Device Notification buffer to ensure the V (valid) + * bits are clear. */ + memset(whc->dn_buf, 0, 4096); + + le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR); +} + +int whc_init(struct whc *whc) +{ + u32 whcsparams; + int ret, i; + resource_size_t start, len; + + spin_lock_init(&whc->lock); + mutex_init(&whc->mutex); + init_waitqueue_head(&whc->cmd_wq); + init_waitqueue_head(&whc->async_list_wq); + init_waitqueue_head(&whc->periodic_list_wq); + whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev)); + if (whc->workqueue == NULL) { + ret = -ENOMEM; + goto error; + } + INIT_WORK(&whc->dn_work, whc_dn_work); + + INIT_WORK(&whc->async_work, scan_async_work); + INIT_LIST_HEAD(&whc->async_list); + INIT_LIST_HEAD(&whc->async_removed_list); + + INIT_WORK(&whc->periodic_work, scan_periodic_work); + for (i = 0; i < 5; i++) + INIT_LIST_HEAD(&whc->periodic_list[i]); + INIT_LIST_HEAD(&whc->periodic_removed_list); + + /* Map HC registers. */ + start = whc->umc->resource.start; + len = whc->umc->resource.end - start + 1; + if (!request_mem_region(start, len, "whci-hc")) { + dev_err(&whc->umc->dev, "can't request HC region\n"); + ret = -EBUSY; + goto error; + } + whc->base_phys = start; + whc->base = ioremap(start, len); + if (!whc->base) { + dev_err(&whc->umc->dev, "ioremap\n"); + ret = -ENOMEM; + goto error; + } + + whc_hw_reset(whc); + + /* Read maximum number of devices, keys and MMC IEs. */ + whcsparams = le_readl(whc->base + WHCSPARAMS); + whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams); + whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams); + whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams); + + dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n", + whc->n_devices, whc->n_keys, whc->n_mmc_ies); + + whc->qset_pool = dma_pool_create("qset", &whc->umc->dev, + sizeof(struct whc_qset), 64, 0); + if (whc->qset_pool == NULL) { + ret = -ENOMEM; + goto error; + } + + ret = asl_init(whc); + if (ret < 0) + goto error; + ret = pzl_init(whc); + if (ret < 0) + goto error; + + /* Allocate and initialize a buffer for generic commands, the + Device Information buffer, and the Device Notification + buffer. */ + + whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, + &whc->gen_cmd_buf_dma, GFP_KERNEL); + if (whc->gen_cmd_buf == NULL) { + ret = -ENOMEM; + goto error; + } + + whc->dn_buf = dma_alloc_coherent(&whc->umc->dev, + sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, + &whc->dn_buf_dma, GFP_KERNEL); + if (!whc->dn_buf) { + ret = -ENOMEM; + goto error; + } + whc_hw_init_dn_buf(whc); + + whc->di_buf = dma_alloc_coherent(&whc->umc->dev, + sizeof(struct di_buf_entry) * whc->n_devices, + &whc->di_buf_dma, GFP_KERNEL); + if (!whc->di_buf) { + ret = -ENOMEM; + goto error; + } + whc_hw_init_di_buf(whc); + + return 0; + +error: + whc_clean_up(whc); + return ret; +} + +void whc_clean_up(struct whc *whc) +{ + resource_size_t len; + + if (whc->di_buf) + dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, + whc->di_buf, whc->di_buf_dma); + if (whc->dn_buf) + dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, + whc->dn_buf, whc->dn_buf_dma); + if (whc->gen_cmd_buf) + dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, + whc->gen_cmd_buf, whc->gen_cmd_buf_dma); + + pzl_clean_up(whc); + asl_clean_up(whc); + + if (whc->qset_pool) + dma_pool_destroy(whc->qset_pool); + + len = whc->umc->resource.end - whc->umc->resource.start + 1; + if (whc->base) + iounmap(whc->base); + if (whc->base_phys) + release_mem_region(whc->base_phys, len); + + if (whc->workqueue) + destroy_workqueue(whc->workqueue); +} diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c new file mode 100644 index 0000000..fce0117 --- /dev/null +++ b/drivers/usb/host/whci/int.c @@ -0,0 +1,95 @@ +/* + * Wireless Host Controller (WHC) interrupt handling. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +static void transfer_done(struct whc *whc) +{ + queue_work(whc->workqueue, &whc->async_work); + queue_work(whc->workqueue, &whc->periodic_work); +} + +irqreturn_t whc_int_handler(struct usb_hcd *hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd); + struct whc *whc = wusbhc_to_whc(wusbhc); + u32 sts; + + sts = le_readl(whc->base + WUSBSTS); + if (!(sts & WUSBSTS_INT_MASK)) + return IRQ_NONE; + le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS); + + if (sts & WUSBSTS_GEN_CMD_DONE) + wake_up(&whc->cmd_wq); + + if (sts & WUSBSTS_HOST_ERR) + dev_err(&whc->umc->dev, "FIXME: host system error\n"); + + if (sts & WUSBSTS_ASYNC_SCHED_SYNCED) + wake_up(&whc->async_list_wq); + + if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED) + wake_up(&whc->periodic_list_wq); + + if (sts & WUSBSTS_DNTS_INT) + queue_work(whc->workqueue, &whc->dn_work); + + /* + * A transfer completed (see [WHCI] section 4.7.1.2 for when + * this occurs). + */ + if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT)) + transfer_done(whc); + + return IRQ_HANDLED; +} + +static int process_dn_buf(struct whc *whc) +{ + struct wusbhc *wusbhc = &whc->wusbhc; + struct dn_buf_entry *dn; + int processed = 0; + + for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) { + if (dn->status & WHC_DN_STATUS_VALID) { + wusbhc_handle_dn(wusbhc, dn->src_addr, + (struct wusb_dn_hdr *)dn->dn_data, + dn->msg_size); + dn->status &= ~WHC_DN_STATUS_VALID; + processed++; + } + } + return processed; +} + +void whc_dn_work(struct work_struct *work) +{ + struct whc *whc = container_of(work, struct whc, dn_work); + int processed; + + do { + processed = process_dn_buf(whc); + } while (processed); +} diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c new file mode 100644 index 0000000..8d62df0 --- /dev/null +++ b/drivers/usb/host/whci/pzl.c @@ -0,0 +1,398 @@ +/* + * Wireless Host Controller (WHC) periodic schedule management. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#define D_LOCAL 0 +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +#if D_LOCAL >= 4 +static void dump_pzl(struct whc *whc, const char *tag) +{ + struct device *dev = &whc->umc->dev; + struct whc_qset *qset; + int period = 0; + + d_printf(4, dev, "PZL %s\n", tag); + + for (period = 0; period < 5; period++) { + d_printf(4, dev, "Period %d\n", period); + list_for_each_entry(qset, &whc->periodic_list[period], list_node) { + dump_qset(qset, dev); + } + } +} +#else +static inline void dump_pzl(struct whc *whc, const char *tag) +{ +} +#endif + +static void update_pzl_pointers(struct whc *whc, int period, u64 addr) +{ + switch (period) { + case 0: + whc_qset_set_link_ptr(&whc->pz_list[0], addr); + whc_qset_set_link_ptr(&whc->pz_list[2], addr); + whc_qset_set_link_ptr(&whc->pz_list[4], addr); + whc_qset_set_link_ptr(&whc->pz_list[6], addr); + whc_qset_set_link_ptr(&whc->pz_list[8], addr); + whc_qset_set_link_ptr(&whc->pz_list[10], addr); + whc_qset_set_link_ptr(&whc->pz_list[12], addr); + whc_qset_set_link_ptr(&whc->pz_list[14], addr); + break; + case 1: + whc_qset_set_link_ptr(&whc->pz_list[1], addr); + whc_qset_set_link_ptr(&whc->pz_list[5], addr); + whc_qset_set_link_ptr(&whc->pz_list[9], addr); + whc_qset_set_link_ptr(&whc->pz_list[13], addr); + break; + case 2: + whc_qset_set_link_ptr(&whc->pz_list[3], addr); + whc_qset_set_link_ptr(&whc->pz_list[11], addr); + break; + case 3: + whc_qset_set_link_ptr(&whc->pz_list[7], addr); + break; + case 4: + whc_qset_set_link_ptr(&whc->pz_list[15], addr); + break; + } +} + +/* + * Return the 'period' to use for this qset. The minimum interval for + * the endpoint is used so whatever urbs are submitted the device is + * polled often enough. + */ +static int qset_get_period(struct whc *whc, struct whc_qset *qset) +{ + uint8_t bInterval = qset->ep->desc.bInterval; + + if (bInterval < 6) + bInterval = 6; + if (bInterval > 10) + bInterval = 10; + return bInterval - 6; +} + +static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset) +{ + int period; + + period = qset_get_period(whc, qset); + + qset_clear(whc, qset); + list_move(&qset->list_node, &whc->periodic_list[period]); + qset->in_sw_list = true; +} + +static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset) +{ + list_move(&qset->list_node, &whc->periodic_removed_list); + qset->in_hw_list = false; + qset->in_sw_list = false; +} + +/** + * pzl_process_qset - process any recently inactivated or halted qTDs + * in a qset. + * + * After inactive qTDs are removed, new qTDs can be added if the + * urb queue still contains URBs. + * + * Returns the schedule updates required. + */ +static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) +{ + enum whc_update update = 0; + uint32_t status = 0; + + while (qset->ntds) { + struct whc_qtd *td; + int t; + + t = qset->td_start; + td = &qset->qtd[qset->td_start]; + status = le32_to_cpu(td->status); + + /* + * Nothing to do with a still active qTD. + */ + if (status & QTD_STS_ACTIVE) + break; + + if (status & QTD_STS_HALTED) { + /* Ug, an error. */ + process_halted_qtd(whc, qset, td); + goto done; + } + + /* Mmm, a completed qTD. */ + process_inactive_qtd(whc, qset, td); + } + + update |= qset_add_qtds(whc, qset); + +done: + /* + * If there are no qTDs in this qset, remove it from the PZL. + */ + if (qset->remove && qset->ntds == 0) { + pzl_qset_remove(whc, qset); + update |= WHC_UPDATE_REMOVED; + } + + return update; +} + +/** + * pzl_start - start the periodic schedule + * @whc: the WHCI host controller + * + * The PZL must be valid (e.g., all entries in the list should have + * the T bit set). + */ +void pzl_start(struct whc *whc) +{ + le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); + + whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN); + whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, + WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED, + 1000, "start PZL"); +} + +/** + * pzl_stop - stop the periodic schedule + * @whc: the WHCI host controller + */ +void pzl_stop(struct whc *whc) +{ + whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0); + whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS, + WUSBSTS_PERIODIC_SCHED, 0, + 1000, "stop PZL"); +} + +void pzl_update(struct whc *whc, uint32_t wusbcmd) +{ + whc_write_wusbcmd(whc, wusbcmd, wusbcmd); + wait_event(whc->periodic_list_wq, + (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); +} + +static void update_pzl_hw_view(struct whc *whc) +{ + struct whc_qset *qset, *t; + int period; + u64 tmp_qh = 0; + + for (period = 0; period < 5; period++) { + list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { + whc_qset_set_link_ptr(&qset->qh.link, tmp_qh); + tmp_qh = qset->qset_dma; + qset->in_hw_list = true; + } + update_pzl_pointers(whc, period, tmp_qh); + } +} + +/** + * scan_periodic_work - scan the PZL for qsets to process. + * + * Process each qset in the PZL in turn and then signal the WHC that + * the PZL has been updated. + * + * Then start, stop or update the periodic schedule as required. + */ +void scan_periodic_work(struct work_struct *work) +{ + struct whc *whc = container_of(work, struct whc, periodic_work); + struct whc_qset *qset, *t; + enum whc_update update = 0; + int period; + + spin_lock_irq(&whc->lock); + + dump_pzl(whc, "before processing"); + + for (period = 4; period >= 0; period--) { + list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { + if (!qset->in_hw_list) + update |= WHC_UPDATE_ADDED; + update |= pzl_process_qset(whc, qset); + } + } + + if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) + update_pzl_hw_view(whc); + + dump_pzl(whc, "after processing"); + + spin_unlock_irq(&whc->lock); + + if (update) { + uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB; + if (update & WHC_UPDATE_REMOVED) + wusbcmd |= WUSBCMD_PERIODIC_QSET_RM; + pzl_update(whc, wusbcmd); + } + + /* + * Now that the PZL is updated, complete the removal of any + * removed qsets. + */ + spin_lock(&whc->lock); + + list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) { + qset_remove_complete(whc, qset); + } + + spin_unlock(&whc->lock); +} + +/** + * pzl_urb_enqueue - queue an URB onto the periodic list (PZL) + * @whc: the WHCI host controller + * @urb: the URB to enqueue + * @mem_flags: flags for any memory allocations + * + * The qset for the endpoint is obtained and the urb queued on to it. + * + * Work is scheduled to update the hardware's view of the PZL. + */ +int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) +{ + struct whc_qset *qset; + int err; + unsigned long flags; + + spin_lock_irqsave(&whc->lock, flags); + + qset = get_qset(whc, urb, GFP_ATOMIC); + if (qset == NULL) + err = -ENOMEM; + else + err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); + if (!err) { + usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb); + if (!qset->in_sw_list) + qset_insert_in_sw_list(whc, qset); + } + + spin_unlock_irqrestore(&whc->lock, flags); + + if (!err) + queue_work(whc->workqueue, &whc->periodic_work); + + return 0; +} + +/** + * pzl_urb_dequeue - remove an URB (qset) from the periodic list + * @whc: the WHCI host controller + * @urb: the URB to dequeue + * @status: the current status of the URB + * + * URBs that do yet have qTDs can simply be removed from the software + * queue, otherwise the qset must be removed so the qTDs can be safely + * removed. + */ +int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) +{ + struct whc_urb *wurb = urb->hcpriv; + struct whc_qset *qset = wurb->qset; + struct whc_std *std, *t; + int ret; + unsigned long flags; + + spin_lock_irqsave(&whc->lock, flags); + + ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status); + if (ret < 0) + goto out; + + list_for_each_entry_safe(std, t, &qset->stds, list_node) { + if (std->urb == urb) + qset_free_std(whc, std); + else + std->qtd = NULL; /* so this std is re-added when the qset is */ + } + + pzl_qset_remove(whc, qset); + wurb->status = status; + wurb->is_async = false; + queue_work(whc->workqueue, &wurb->dequeue_work); + +out: + spin_unlock_irqrestore(&whc->lock, flags); + + return ret; +} + +/** + * pzl_qset_delete - delete a qset from the PZL + */ +void pzl_qset_delete(struct whc *whc, struct whc_qset *qset) +{ + qset->remove = 1; + queue_work(whc->workqueue, &whc->periodic_work); + qset_delete(whc, qset); +} + + +/** + * pzl_init - initialize the periodic zone list + * @whc: the WHCI host controller + */ +int pzl_init(struct whc *whc) +{ + int i; + + whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16, + &whc->pz_list_dma, GFP_KERNEL); + if (whc->pz_list == NULL) + return -ENOMEM; + + /* Set T bit on all elements in PZL. */ + for (i = 0; i < 16; i++) + whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); + + le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE); + + return 0; +} + +/** + * pzl_clean_up - free PZL resources + * @whc: the WHCI host controller + * + * The PZL is stopped and empty. + */ +void pzl_clean_up(struct whc *whc) +{ + if (whc->pz_list) + dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list, + whc->pz_list_dma); +} diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c new file mode 100644 index 0000000..0420037 --- /dev/null +++ b/drivers/usb/host/whci/qset.c @@ -0,0 +1,567 @@ +/* + * Wireless Host Controller (WHC) qset management. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +void dump_qset(struct whc_qset *qset, struct device *dev) +{ + struct whc_std *std; + struct urb *urb = NULL; + int i; + + dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma); + dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link); + dev_dbg(dev, " info: %08x %08x %08x\n", + qset->qh.info1, qset->qh.info2, qset->qh.info3); + dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); + dev_dbg(dev, " TD: sts: %08x opts: %08x\n", + qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); + + for (i = 0; i < WHCI_QSET_TD_MAX; i++) { + dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", + i == qset->td_start ? 'S' : ' ', + i == qset->td_end ? 'E' : ' ', + i, qset->qtd[i].status, qset->qtd[i].options, + (u32)qset->qtd[i].page_list_ptr); + } + dev_dbg(dev, " ntds: %d\n", qset->ntds); + list_for_each_entry(std, &qset->stds, list_node) { + if (urb != std->urb) { + urb = std->urb; + dev_dbg(dev, " urb %p transferred: %d bytes\n", urb, + urb->actual_length); + } + if (std->qtd) + dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n", + std->qtd - &qset->qtd[0], + std->len, std->num_pointers ? + (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); + else + dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n", + std->len, std->num_pointers ? + (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); + } +} + +struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) +{ + struct whc_qset *qset; + dma_addr_t dma; + + qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); + if (qset == NULL) + return NULL; + memset(qset, 0, sizeof(struct whc_qset)); + + qset->qset_dma = dma; + qset->whc = whc; + + INIT_LIST_HEAD(&qset->list_node); + INIT_LIST_HEAD(&qset->stds); + + return qset; +} + +/** + * qset_fill_qh - fill the static endpoint state in a qset's QHead + * @qset: the qset whose QH needs initializing with static endpoint + * state + * @urb: an urb for a transfer to this endpoint + */ +static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) +{ + struct usb_device *usb_dev = urb->dev; + struct usb_wireless_ep_comp_descriptor *epcd; + bool is_out; + + is_out = usb_pipeout(urb->pipe); + + epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; + + if (epcd) { + qset->max_seq = epcd->bMaxSequence; + qset->max_burst = epcd->bMaxBurst; + } else { + qset->max_seq = 2; + qset->max_burst = 1; + } + + qset->qh.info1 = cpu_to_le32( + QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) + | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) + | usb_pipe_to_qh_type(urb->pipe) + | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) + | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) + ); + qset->qh.info2 = cpu_to_le32( + QH_INFO2_BURST(qset->max_burst) + | QH_INFO2_DBP(0) + | QH_INFO2_MAX_COUNT(3) + | QH_INFO2_MAX_RETRY(3) + | QH_INFO2_MAX_SEQ(qset->max_seq - 1) + ); + /* FIXME: where can we obtain these Tx parameters from? Why + * doesn't the chip know what Tx power to use? It knows the Rx + * strength and can presumably guess the Tx power required + * from that? */ + qset->qh.info3 = cpu_to_le32( + QH_INFO3_TX_RATE_53_3 + | QH_INFO3_TX_PWR(0) /* 0 == max power */ + ); +} + +/** + * qset_clear - clear fields in a qset so it may be reinserted into a + * schedule + */ +void qset_clear(struct whc *whc, struct whc_qset *qset) +{ + qset->td_start = qset->td_end = qset->ntds = 0; + qset->remove = 0; + + qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); + qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start)); + qset->qh.err_count = 0; + qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); + qset->qh.scratch[0] = 0; + qset->qh.scratch[1] = 0; + qset->qh.scratch[2] = 0; + + memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); + + init_completion(&qset->remove_complete); +} + +/** + * get_qset - get the qset for an async endpoint + * + * A new qset is created if one does not already exist. + */ +struct whc_qset *get_qset(struct whc *whc, struct urb *urb, + gfp_t mem_flags) +{ + struct whc_qset *qset; + + qset = urb->ep->hcpriv; + if (qset == NULL) { + qset = qset_alloc(whc, mem_flags); + if (qset == NULL) + return NULL; + + qset->ep = urb->ep; + urb->ep->hcpriv = qset; + qset_fill_qh(qset, urb); + } + return qset; +} + +void qset_remove_complete(struct whc *whc, struct whc_qset *qset) +{ + list_del_init(&qset->list_node); + complete(&qset->remove_complete); +} + +/** + * qset_add_qtds - add qTDs for an URB to a qset + * + * Returns true if the list (ASL/PZL) must be updated because (for a + * WHCI 0.95 controller) an activated qTD was pointed to be iCur. + */ +enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) +{ + struct whc_std *std; + enum whc_update update = 0; + + list_for_each_entry(std, &qset->stds, list_node) { + struct whc_qtd *qtd; + uint32_t status; + + if (qset->ntds >= WHCI_QSET_TD_MAX + || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) + break; + + if (std->qtd) + continue; /* already has a qTD */ + + qtd = std->qtd = &qset->qtd[qset->td_end]; + + /* Fill in setup bytes for control transfers. */ + if (usb_pipecontrol(std->urb->pipe)) + memcpy(qtd->setup, std->urb->setup_packet, 8); + + status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); + + if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) + status |= QTD_STS_LAST_PKT; + + /* + * For an IN transfer the iAlt field should be set so + * the h/w will automatically advance to the next + * transfer. However, if there are 8 or more TDs + * remaining in this transfer then iAlt cannot be set + * as it could point to somewhere in this transfer. + */ + if (std->ntds_remaining < WHCI_QSET_TD_MAX) { + int ialt; + ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; + status |= QTD_STS_IALT(ialt); + } else if (usb_pipein(std->urb->pipe)) + qset->pause_after_urb = std->urb; + + if (std->num_pointers) + qtd->options = cpu_to_le32(QTD_OPT_IOC); + else + qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); + qtd->page_list_ptr = cpu_to_le64(std->dma_addr); + + qtd->status = cpu_to_le32(status); + + if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) + update = WHC_UPDATE_UPDATED; + + if (++qset->td_end >= WHCI_QSET_TD_MAX) + qset->td_end = 0; + qset->ntds++; + } + + return update; +} + +/** + * qset_remove_qtd - remove the first qTD from a qset. + * + * The qTD might be still active (if it's part of a IN URB that + * resulted in a short read) so ensure it's deactivated. + */ +static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) +{ + qset->qtd[qset->td_start].status = 0; + + if (++qset->td_start >= WHCI_QSET_TD_MAX) + qset->td_start = 0; + qset->ntds--; +} + +/** + * qset_free_std - remove an sTD and free it. + * @whc: the WHCI host controller + * @std: the sTD to remove and free. + */ +void qset_free_std(struct whc *whc, struct whc_std *std) +{ + list_del(&std->list_node); + if (std->num_pointers) { + dma_unmap_single(whc->wusbhc.dev, std->dma_addr, + std->num_pointers * sizeof(struct whc_page_list_entry), + DMA_TO_DEVICE); + kfree(std->pl_virt); + } + + kfree(std); +} + +/** + * qset_remove_qtds - remove an URB's qTDs (and sTDs). + */ +static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, + struct urb *urb) +{ + struct whc_std *std, *t; + + list_for_each_entry_safe(std, t, &qset->stds, list_node) { + if (std->urb != urb) + break; + if (std->qtd != NULL) + qset_remove_qtd(whc, qset); + qset_free_std(whc, std); + } +} + +/** + * qset_free_stds - free any remaining sTDs for an URB. + */ +static void qset_free_stds(struct whc_qset *qset, struct urb *urb) +{ + struct whc_std *std, *t; + + list_for_each_entry_safe(std, t, &qset->stds, list_node) { + if (std->urb == urb) + qset_free_std(qset->whc, std); + } +} + +static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) +{ + dma_addr_t dma_addr = std->dma_addr; + dma_addr_t sp, ep; + size_t std_len = std->len; + size_t pl_len; + int p; + + sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); + ep = dma_addr + std_len; + std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); + + pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); + std->pl_virt = kmalloc(pl_len, mem_flags); + if (std->pl_virt == NULL) + return -ENOMEM; + std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); + + for (p = 0; p < std->num_pointers; p++) { + std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); + dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); + } + + return 0; +} + +/** + * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. + */ +static void urb_dequeue_work(struct work_struct *work) +{ + struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); + struct whc_qset *qset = wurb->qset; + struct whc *whc = qset->whc; + unsigned long flags; + + if (wurb->is_async == true) + asl_update(whc, WUSBCMD_ASYNC_UPDATED + | WUSBCMD_ASYNC_SYNCED_DB + | WUSBCMD_ASYNC_QSET_RM); + else + pzl_update(whc, WUSBCMD_PERIODIC_UPDATED + | WUSBCMD_PERIODIC_SYNCED_DB + | WUSBCMD_PERIODIC_QSET_RM); + + spin_lock_irqsave(&whc->lock, flags); + qset_remove_urb(whc, qset, wurb->urb, wurb->status); + spin_unlock_irqrestore(&whc->lock, flags); +} + +/** + * qset_add_urb - add an urb to the qset's queue. + * + * The URB is chopped into sTDs, one for each qTD that will required. + * At least one qTD (and sTD) is required even if the transfer has no + * data (e.g., for some control transfers). + */ +int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, + gfp_t mem_flags) +{ + struct whc_urb *wurb; + int remaining = urb->transfer_buffer_length; + u64 transfer_dma = urb->transfer_dma; + int ntds_remaining; + + ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); + if (ntds_remaining == 0) + ntds_remaining = 1; + + wurb = kzalloc(sizeof(struct whc_urb), mem_flags); + if (wurb == NULL) + goto err_no_mem; + urb->hcpriv = wurb; + wurb->qset = qset; + wurb->urb = urb; + INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); + + while (ntds_remaining) { + struct whc_std *std; + size_t std_len; + + std = kmalloc(sizeof(struct whc_std), mem_flags); + if (std == NULL) + goto err_no_mem; + + std_len = remaining; + if (std_len > QTD_MAX_XFER_SIZE) + std_len = QTD_MAX_XFER_SIZE; + + std->urb = urb; + std->dma_addr = transfer_dma; + std->len = std_len; + std->ntds_remaining = ntds_remaining; + std->qtd = NULL; + + INIT_LIST_HEAD(&std->list_node); + list_add_tail(&std->list_node, &qset->stds); + + if (std_len > WHCI_PAGE_SIZE) { + if (qset_fill_page_list(whc, std, mem_flags) < 0) + goto err_no_mem; + } else + std->num_pointers = 0; + + ntds_remaining--; + remaining -= std_len; + transfer_dma += std_len; + } + + return 0; + +err_no_mem: + qset_free_stds(qset, urb); + return -ENOMEM; +} + +/** + * qset_remove_urb - remove an URB from the urb queue. + * + * The URB is returned to the USB subsystem. + */ +void qset_remove_urb(struct whc *whc, struct whc_qset *qset, + struct urb *urb, int status) +{ + struct wusbhc *wusbhc = &whc->wusbhc; + struct whc_urb *wurb = urb->hcpriv; + + usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); + /* Drop the lock as urb->complete() may enqueue another urb. */ + spin_unlock(&whc->lock); + wusbhc_giveback_urb(wusbhc, urb, status); + spin_lock(&whc->lock); + + kfree(wurb); +} + +/** + * get_urb_status_from_qtd - get the completed urb status from qTD status + * @urb: completed urb + * @status: qTD status + */ +static int get_urb_status_from_qtd(struct urb *urb, u32 status) +{ + if (status & QTD_STS_HALTED) { + if (status & QTD_STS_DBE) + return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; + else if (status & QTD_STS_BABBLE) + return -EOVERFLOW; + else if (status & QTD_STS_RCE) + return -ETIME; + return -EPIPE; + } + if (usb_pipein(urb->pipe) + && (urb->transfer_flags & URB_SHORT_NOT_OK) + && urb->actual_length < urb->transfer_buffer_length) + return -EREMOTEIO; + return 0; +} + +/** + * process_inactive_qtd - process an inactive (but not halted) qTD. + * + * Update the urb with the transfer bytes from the qTD, if the urb is + * completely transfered or (in the case of an IN only) the LPF is + * set, then the transfer is complete and the urb should be returned + * to the system. + */ +void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, + struct whc_qtd *qtd) +{ + struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); + struct urb *urb = std->urb; + uint32_t status; + bool complete; + + status = le32_to_cpu(qtd->status); + + urb->actual_length += std->len - QTD_STS_TO_LEN(status); + + if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) + complete = true; + else + complete = whc_std_last(std); + + qset_remove_qtd(whc, qset); + qset_free_std(whc, std); + + /* + * Transfers for this URB are complete? Then return it to the + * USB subsystem. + */ + if (complete) { + qset_remove_qtds(whc, qset, urb); + qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); + + /* + * If iAlt isn't valid then the hardware didn't + * advance iCur. Adjust the start and end pointers to + * match iCur. + */ + if (!(status & QTD_STS_IALT_VALID)) + qset->td_start = qset->td_end + = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); + qset->pause_after_urb = NULL; + } +} + +/** + * process_halted_qtd - process a qset with a halted qtd + * + * Remove all the qTDs for the failed URB and return the failed URB to + * the USB subsystem. Then remove all other qTDs so the qset can be + * removed. + * + * FIXME: this is the point where rate adaptation can be done. If a + * transfer failed because it exceeded the maximum number of retries + * then it could be reactivated with a slower rate without having to + * remove the qset. + */ +void process_halted_qtd(struct whc *whc, struct whc_qset *qset, + struct whc_qtd *qtd) +{ + struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); + struct urb *urb = std->urb; + int urb_status; + + urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); + + qset_remove_qtds(whc, qset, urb); + qset_remove_urb(whc, qset, urb, urb_status); + + list_for_each_entry(std, &qset->stds, list_node) { + if (qset->ntds == 0) + break; + qset_remove_qtd(whc, qset); + std->qtd = NULL; + } + + qset->remove = 1; +} + +void qset_free(struct whc *whc, struct whc_qset *qset) +{ + dma_pool_free(whc->qset_pool, qset, qset->qset_dma); +} + +/** + * qset_delete - wait for a qset to be unused, then free it. + */ +void qset_delete(struct whc *whc, struct whc_qset *qset) +{ + wait_for_completion(&qset->remove_complete); + qset_free(whc, qset); +} diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h new file mode 100644 index 0000000..1d2a53b --- /dev/null +++ b/drivers/usb/host/whci/whcd.h @@ -0,0 +1,197 @@ +/* + * Wireless Host Controller (WHC) private header. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ +#ifndef __WHCD_H +#define __WHCD_H + +#include +#include + +#include "whci-hc.h" + +/* Generic command timeout. */ +#define WHC_GENCMD_TIMEOUT_MS 100 + + +struct whc { + struct wusbhc wusbhc; + struct umc_dev *umc; + + resource_size_t base_phys; + void __iomem *base; + int irq; + + u8 n_devices; + u8 n_keys; + u8 n_mmc_ies; + + u64 *pz_list; + struct dn_buf_entry *dn_buf; + struct di_buf_entry *di_buf; + dma_addr_t pz_list_dma; + dma_addr_t dn_buf_dma; + dma_addr_t di_buf_dma; + + spinlock_t lock; + struct mutex mutex; + + void * gen_cmd_buf; + dma_addr_t gen_cmd_buf_dma; + wait_queue_head_t cmd_wq; + + struct workqueue_struct *workqueue; + struct work_struct dn_work; + + struct dma_pool *qset_pool; + + struct list_head async_list; + struct list_head async_removed_list; + wait_queue_head_t async_list_wq; + struct work_struct async_work; + + struct list_head periodic_list[5]; + struct list_head periodic_removed_list; + wait_queue_head_t periodic_list_wq; + struct work_struct periodic_work; +}; + +#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) + +/** + * struct whc_std - a software TD. + * @urb: the URB this sTD is for. + * @offset: start of the URB's data for this TD. + * @len: the length of data in the associated TD. + * @ntds_remaining: number of TDs (starting from this one) in this transfer. + * + * Queued URBs may require more TDs than are available in a qset so we + * use a list of these "software TDs" (sTDs) to hold per-TD data. + */ +struct whc_std { + struct urb *urb; + size_t len; + int ntds_remaining; + struct whc_qtd *qtd; + + struct list_head list_node; + int num_pointers; + dma_addr_t dma_addr; + struct whc_page_list_entry *pl_virt; +}; + +/** + * struct whc_urb - per URB host controller structure. + * @urb: the URB this struct is for. + * @qset: the qset associated to the URB. + * @dequeue_work: the work to remove the URB when dequeued. + * @is_async: the URB belongs to async sheduler or not. + * @status: the status to be returned when calling wusbhc_giveback_urb. + */ +struct whc_urb { + struct urb *urb; + struct whc_qset *qset; + struct work_struct dequeue_work; + bool is_async; + int status; +}; + +/** + * whc_std_last - is this sTD the URB's last? + * @std: the sTD to check. + */ +static inline bool whc_std_last(struct whc_std *std) +{ + return std->ntds_remaining <= 1; +} + +enum whc_update { + WHC_UPDATE_ADDED = 0x01, + WHC_UPDATE_REMOVED = 0x02, + WHC_UPDATE_UPDATED = 0x04, +}; + +/* init.c */ +int whc_init(struct whc *whc); +void whc_clean_up(struct whc *whc); + +/* hw.c */ +void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val); +int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len); + +/* wusb.c */ +int whc_wusbhc_start(struct wusbhc *wusbhc); +void whc_wusbhc_stop(struct wusbhc *wusbhc); +int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, + u8 handle, struct wuie_hdr *wuie); +int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); +int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm); +int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); +int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots); +int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, + const void *ptk, size_t key_size); +int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, + const void *gtk, size_t key_size); +int whc_set_cluster_id(struct whc *whc, u8 bcid); + +/* int.c */ +irqreturn_t whc_int_handler(struct usb_hcd *hcd); +void whc_dn_work(struct work_struct *work); + +/* asl.c */ +void asl_start(struct whc *whc); +void asl_stop(struct whc *whc); +int asl_init(struct whc *whc); +void asl_clean_up(struct whc *whc); +int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); +int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status); +void asl_qset_delete(struct whc *whc, struct whc_qset *qset); +void scan_async_work(struct work_struct *work); + +/* pzl.c */ +int pzl_init(struct whc *whc); +void pzl_clean_up(struct whc *whc); +void pzl_start(struct whc *whc); +void pzl_stop(struct whc *whc); +int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags); +int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status); +void pzl_qset_delete(struct whc *whc, struct whc_qset *qset); +void scan_periodic_work(struct work_struct *work); + +/* qset.c */ +struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags); +void qset_free(struct whc *whc, struct whc_qset *qset); +struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags); +void qset_delete(struct whc *whc, struct whc_qset *qset); +void qset_clear(struct whc *whc, struct whc_qset *qset); +int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, + gfp_t mem_flags); +void qset_free_std(struct whc *whc, struct whc_std *std); +void qset_remove_urb(struct whc *whc, struct whc_qset *qset, + struct urb *urb, int status); +void process_halted_qtd(struct whc *whc, struct whc_qset *qset, + struct whc_qtd *qtd); +void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, + struct whc_qtd *qtd); +enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); +void qset_remove_complete(struct whc *whc, struct whc_qset *qset); +void dump_qset(struct whc_qset *qset, struct device *dev); +void pzl_update(struct whc *whc, uint32_t wusbcmd); +void asl_update(struct whc *whc, uint32_t wusbcmd); + +#endif /* #ifndef __WHCD_H */ diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h new file mode 100644 index 0000000..bff1eb7 --- /dev/null +++ b/drivers/usb/host/whci/whci-hc.h @@ -0,0 +1,416 @@ +/* + * Wireless Host Controller (WHC) data structures. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ +#ifndef _WHCI_WHCI_HC_H +#define _WHCI_WHCI_HC_H + +#include + +/** + * WHCI_PAGE_SIZE - page size use by WHCI + * + * WHCI assumes that host system uses pages of 4096 octets. + */ +#define WHCI_PAGE_SIZE 4096 + + +/** + * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single + * qtd. + * + * This is 2^20 - 1. + */ +#define QTD_MAX_XFER_SIZE 1048575 + + +/** + * struct whc_qtd - Queue Element Transfer Descriptors (qTD) + * + * This describes the data for a bulk, control or interrupt transfer. + * + * [WHCI] section 3.2.4 + */ +struct whc_qtd { + __le32 status; /*< remaining transfer len and transfer status */ + __le32 options; + __le64 page_list_ptr; /*< physical pointer to data buffer page list*/ + __u8 setup[8]; /*< setup data for control transfers */ +} __attribute__((packed)); + +#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */ +#define QTD_STS_HALTED (1 << 30) /* transfer halted */ +#define QTD_STS_DBE (1 << 29) /* data buffer error */ +#define QTD_STS_BABBLE (1 << 28) /* babble detected */ +#define QTD_STS_RCE (1 << 27) /* retry count exceeded */ +#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */ +#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */ +#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */ +#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */ +#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */ +#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff) + +#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */ +#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */ + +/** + * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD) + * + * This describes the data and other parameters for an isochronous + * transfer. + * + * [WHCI] section 3.2.5 + */ +struct whc_itd { + __le16 presentation_time; /*< presentation time for OUT transfers */ + __u8 num_segments; /*< number of data segments in segment list */ + __u8 status; /*< command execution status */ + __le32 options; /*< misc transfer options */ + __le64 page_list_ptr; /*< physical pointer to data buffer page list */ + __le64 seg_list_ptr; /*< physical pointer to segment list */ +} __attribute__((packed)); + +#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */ +#define ITD_STS_DBE (1 << 5) /* data buffer error */ +#define ITD_STS_BABBLE (1 << 4) /* babble detected */ +#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */ + +#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */ +#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */ + +/** + * Page list entry. + * + * A TD's page list must contain sufficient page list entries for the + * total data length in the TD. + * + * [WHCI] section 3.2.4.3 + */ +struct whc_page_list_entry { + __le64 buf_ptr; /*< physical pointer to buffer */ +} __attribute__((packed)); + +/** + * struct whc_seg_list_entry - Segment list entry. + * + * Describes a portion of the data buffer described in the containing + * qTD's page list. + * + * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr + * + qtd->seg_list_ptr[seg].offset; + * + * Segments can't cross page boundries. + * + * [WHCI] section 3.2.5.5 + */ +struct whc_seg_list_entry { + __le16 len; /*< segment length */ + __u8 idx; /*< index into page list */ + __u8 status; /*< segment status */ + __le16 offset; /*< 12 bit offset into page */ +} __attribute__((packed)); + +/** + * struct whc_qhead - endpoint and status information for a qset. + * + * [WHCI] section 3.2.6 + */ +struct whc_qhead { + __le64 link; /*< next qset in list */ + __le32 info1; + __le32 info2; + __le32 info3; + __le16 status; + __le16 err_count; /*< transaction error count */ + __le32 cur_window; + __le32 scratch[3]; /*< h/w scratch area */ + union { + struct whc_qtd qtd; + struct whc_itd itd; + } overlay; +} __attribute__((packed)); + +#define QH_LINK_PTR_MASK (~0x03Full) +#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK) +#define QH_LINK_IQS (1 << 4) /* isochronous queue set */ +#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */ +#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */ + +#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */ +#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */ +#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */ +#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */ +#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */ +#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */ +#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */ +#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */ +#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */ +#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */ +#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */ + +#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */ +#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */ +#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */ +#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */ +#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */ +#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */ +#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ +#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ + +#define QH_INFO3_TX_RATE_53_3 (0 << 24) +#define QH_INFO3_TX_RATE_80 (1 << 24) +#define QH_INFO3_TX_RATE_106_7 (2 << 24) +#define QH_INFO3_TX_RATE_160 (3 << 24) +#define QH_INFO3_TX_RATE_200 (4 << 24) +#define QH_INFO3_TX_RATE_320 (5 << 24) +#define QH_INFO3_TX_RATE_400 (6 << 24) +#define QH_INFO3_TX_RATE_480 (7 << 24) +#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ + +#define QH_STATUS_FLOW_CTRL (1 << 15) +#define QH_STATUS_ICUR(i) ((i) << 5) +#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7) + +/** + * usb_pipe_to_qh_type - USB core pipe type to QH transfer type + * + * Returns the QH type field for a USB core pipe type. + */ +static inline unsigned usb_pipe_to_qh_type(unsigned pipe) +{ + static const unsigned type[] = { + [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC, + [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT, + [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL, + [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK, + }; + return type[usb_pipetype(pipe)]; +} + +/** + * Maxiumum number of TDs in a qset. + */ +#define WHCI_QSET_TD_MAX 8 + +/** + * struct whc_qset - WUSB data transfers to a specific endpoint + * @qh: the QHead of this qset + * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt + * transfers) + * @itd: up to 8 iTDs (for qsets for isochronous transfers) + * @qset_dma: DMA address for this qset + * @whc: WHCI HC this qset is for + * @ep: endpoint + * @stds: list of sTDs queued to this qset + * @ntds: number of qTDs queued (not necessarily the same as nTDs + * field in the QH) + * @td_start: index of the first qTD in the list + * @td_end: index of next free qTD in the list (provided + * ntds < WHCI_QSET_TD_MAX) + * + * Queue Sets (qsets) are added to the asynchronous schedule list + * (ASL) or the periodic zone list (PZL). + * + * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate). + * Each TD may refer to at most 1 MiB of data. If a single transfer + * has > 8MiB of data, TDs can be reused as they are completed since + * the TD list is used as a circular buffer. Similarly, several + * (smaller) transfers may be queued in a qset. + * + * WHCI controllers may cache portions of the qsets in the ASL and + * PZL, requiring the WHCD to inform the WHC that the lists have been + * updated (fields changed or qsets inserted or removed). For safe + * insertion and removal of qsets from the lists the schedule must be + * stopped to avoid races in updating the QH link pointers. + * + * Since the HC is free to execute qsets in any order, all transfers + * to an endpoint should use the same qset to ensure transfers are + * executed in the order they're submitted. + * + * [WHCI] section 3.2.3 + */ +struct whc_qset { + struct whc_qhead qh; + union { + struct whc_qtd qtd[WHCI_QSET_TD_MAX]; + struct whc_itd itd[WHCI_QSET_TD_MAX]; + }; + + /* private data for WHCD */ + dma_addr_t qset_dma; + struct whc *whc; + struct usb_host_endpoint *ep; + struct list_head stds; + int ntds; + int td_start; + int td_end; + struct list_head list_node; + unsigned in_sw_list:1; + unsigned in_hw_list:1; + unsigned remove:1; + struct urb *pause_after_urb; + struct completion remove_complete; + int max_burst; + int max_seq; +}; + +static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) +{ + if (target) + *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target); + else + *ptr = QH_LINK_T; +} + +/** + * struct di_buf_entry - Device Information (DI) buffer entry. + * + * There's one of these per connected device. + */ +struct di_buf_entry { + __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */ + __le32 addr_sec_info; /*< addressing and security info */ + __le32 reserved[7]; +} __attribute__((packed)); + +#define WHC_DI_SECURE (1 << 31) +#define WHC_DI_DISABLE (1 << 30) +#define WHC_DI_KEY_IDX(k) ((k) << 8) +#define WHC_DI_KEY_IDX_MASK 0x0000ff00 +#define WHC_DI_DEV_ADDR(a) ((a) << 0) +#define WHC_DI_DEV_ADDR_MASK 0x000000ff + +/** + * struct dn_buf_entry - Device Notification (DN) buffer entry. + * + * [WHCI] section 3.2.8 + */ +struct dn_buf_entry { + __u8 msg_size; /*< number of octets of valid DN data */ + __u8 reserved1; + __u8 src_addr; /*< source address */ + __u8 status; /*< buffer entry status */ + __le32 tkid; /*< TKID for source device, valid if secure bit is set */ + __u8 dn_data[56]; /*< up to 56 octets of DN data */ +} __attribute__((packed)); + +#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */ +#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */ + +#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry)) + +/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of + data. [WHCI] section 2.4.7. */ +#define WHC_GEN_CMD_DATA_LEN 256 + +/* + * HC registers. + * + * [WHCI] section 2.4 + */ + +#define WHCIVERSION 0x00 + +#define WHCSPARAMS 0x04 +# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff) +# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff) +# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f) + +#define WUSBCMD 0x08 +# define WUSBCMD_BCID(b) ((b) << 16) +# define WUSBCMD_BCID_MASK (0xff << 16) +# define WUSBCMD_ASYNC_QSET_RM (1 << 12) +# define WUSBCMD_PERIODIC_QSET_RM (1 << 11) +# define WUSBCMD_WUSBSI(s) ((s) << 8) +# define WUSBCMD_WUSBSI_MASK (0x7 << 8) +# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7) +# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6) +# define WUSBCMD_ASYNC_UPDATED (1 << 5) +# define WUSBCMD_PERIODIC_UPDATED (1 << 4) +# define WUSBCMD_ASYNC_EN (1 << 3) +# define WUSBCMD_PERIODIC_EN (1 << 2) +# define WUSBCMD_WHCRESET (1 << 1) +# define WUSBCMD_RUN (1 << 0) + +#define WUSBSTS 0x0c +# define WUSBSTS_ASYNC_SCHED (1 << 15) +# define WUSBSTS_PERIODIC_SCHED (1 << 14) +# define WUSBSTS_DNTS_SCHED (1 << 13) +# define WUSBSTS_HCHALTED (1 << 12) +# define WUSBSTS_GEN_CMD_DONE (1 << 9) +# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8) +# define WUSBSTS_DNTS_OVERFLOW (1 << 7) +# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6) +# define WUSBSTS_HOST_ERR (1 << 5) +# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4) +# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3) +# define WUSBSTS_DNTS_INT (1 << 2) +# define WUSBSTS_ERR_INT (1 << 1) +# define WUSBSTS_INT (1 << 0) +# define WUSBSTS_INT_MASK 0x3ff + +#define WUSBINTR 0x10 +# define WUSBINTR_GEN_CMD_DONE (1 << 9) +# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8) +# define WUSBINTR_DNTS_OVERFLOW (1 << 7) +# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6) +# define WUSBINTR_HOST_ERR (1 << 5) +# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4) +# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3) +# define WUSBINTR_DNTS_INT (1 << 2) +# define WUSBINTR_ERR_INT (1 << 1) +# define WUSBINTR_INT (1 << 0) +# define WUSBINTR_ALL 0x3ff + +#define WUSBGENCMDSTS 0x14 +# define WUSBGENCMDSTS_ACTIVE (1 << 31) +# define WUSBGENCMDSTS_ERROR (1 << 24) +# define WUSBGENCMDSTS_IOC (1 << 23) +# define WUSBGENCMDSTS_MMCIE_ADD 0x01 +# define WUSBGENCMDSTS_MMCIE_RM 0x02 +# define WUSBGENCMDSTS_SET_MAS 0x03 +# define WUSBGENCMDSTS_CHAN_STOP 0x04 +# define WUSBGENCMDSTS_RWP_EN 0x05 + +#define WUSBGENCMDPARAMS 0x18 +#define WUSBGENADDR 0x20 +#define WUSBASYNCLISTADDR 0x28 +#define WUSBDNTSBUFADDR 0x30 +#define WUSBDEVICEINFOADDR 0x38 + +#define WUSBSETSECKEYCMD 0x40 +# define WUSBSETSECKEYCMD_SET (1 << 31) +# define WUSBSETSECKEYCMD_ERASE (1 << 30) +# define WUSBSETSECKEYCMD_GTK (1 << 8) +# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0) + +#define WUSBTKID 0x44 +#define WUSBSECKEY 0x48 +#define WUSBPERIODICLISTBASE 0x58 +#define WUSBMASINDEX 0x60 + +#define WUSBDNTSCTRL 0x64 +# define WUSBDNTSCTRL_ACTIVE (1 << 31) +# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8) +# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) + +#define WUSBTIME 0x68 +#define WUSBBPST 0x6c +#define WUSBDIBUPDATED 0x70 + +#endif /* #ifndef _WHCI_WHCI_HC_H */ diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c new file mode 100644 index 0000000..66e4ddc --- /dev/null +++ b/drivers/usb/host/whci/wusb.c @@ -0,0 +1,241 @@ +/* + * Wireless Host Controller (WHC) WUSB operations. + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#define D_LOCAL 1 +#include + +#include "../../wusbcore/wusbhc.h" + +#include "whcd.h" + +#if D_LOCAL >= 1 +static void dump_di(struct whc *whc, int idx) +{ + struct di_buf_entry *di = &whc->di_buf[idx]; + struct device *dev = &whc->umc->dev; + char buf[128]; + + bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS); + + d_printf(1, dev, "DI[%d]\n", idx); + d_printf(1, dev, " availability: %s\n", buf); + d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n", + (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', + (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', + (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, + (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); +} +#else +static inline void dump_di(struct whc *whc, int idx) +{ +} +#endif + +static int whc_update_di(struct whc *whc, int idx) +{ + int offset = idx / 32; + u32 bit = 1 << (idx % 32); + + dump_di(whc, idx); + + le_writel(bit, whc->base + WUSBDIBUPDATED + offset); + + return whci_wait_for(&whc->umc->dev, + whc->base + WUSBDIBUPDATED + offset, bit, 0, + 100, "DI update"); +} + +/* + * WHCI starts and stops MMCs based on there being a valid GTK so + * these need only start/stop the asynchronous and periodic schedules. + */ + +int whc_wusbhc_start(struct wusbhc *wusbhc) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + + asl_start(whc); + pzl_start(whc); + + return 0; +} + +void whc_wusbhc_stop(struct wusbhc *wusbhc) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + + pzl_stop(whc); + asl_stop(whc); +} + +int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, + u8 handle, struct wuie_hdr *wuie) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + u32 params; + + params = (interval << 24) + | (repeat_cnt << 16) + | (wuie->bLength << 8) + | handle; + + return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength); +} + +int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + u32 params; + + params = handle; + + return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0); +} + +int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + + if (stream_index >= 0) + whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index)); + + return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm)); +} + +int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + int idx = wusb_dev->port_idx; + struct di_buf_entry *di = &whc->di_buf[idx]; + int ret; + + mutex_lock(&whc->mutex); + + uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability); + di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK); + di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr); + + ret = whc_update_di(whc, idx); + + mutex_unlock(&whc->mutex); + + return ret; +} + +/* + * Set the number of Device Notification Time Slots (DNTS) and enable + * device notifications. + */ +int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + u32 dntsctrl; + + dntsctrl = WUSBDNTSCTRL_ACTIVE + | WUSBDNTSCTRL_INTERVAL(interval) + | WUSBDNTSCTRL_SLOTS(slots); + + le_writel(dntsctrl, whc->base + WUSBDNTSCTRL); + + return 0; +} + +static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid, + const void *key, size_t key_size, bool is_gtk) +{ + uint32_t setkeycmd; + uint32_t seckey[4]; + int i; + int ret; + + memcpy(seckey, key, key_size); + setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index); + if (is_gtk) + setkeycmd |= WUSBSETSECKEYCMD_GTK; + + le_writel(tkid, whc->base + WUSBTKID); + for (i = 0; i < 4; i++) + le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i); + le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD); + + ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD, + WUSBSETSECKEYCMD_SET, 0, 100, "set key"); + + return ret; +} + +/** + * whc_set_ptk - set the PTK to use for a device. + * + * The index into the key table for this PTK is the same as the + * device's port index. + */ +int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, + const void *ptk, size_t key_size) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + struct di_buf_entry *di = &whc->di_buf[port_idx]; + int ret; + + mutex_lock(&whc->mutex); + + if (ptk) { + ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false); + if (ret) + goto out; + + di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK; + di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx); + } else + di->addr_sec_info &= ~WHC_DI_SECURE; + + ret = whc_update_di(whc, port_idx); +out: + mutex_unlock(&whc->mutex); + return ret; +} + +/** + * whc_set_gtk - set the GTK for subsequent broadcast packets + * + * The GTK is stored in the last entry in the key table (the previous + * N_DEVICES entries are for the per-device PTKs). + */ +int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid, + const void *gtk, size_t key_size) +{ + struct whc *whc = wusbhc_to_whc(wusbhc); + int ret; + + mutex_lock(&whc->mutex); + + ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true); + + mutex_unlock(&whc->mutex); + + return ret; +} + +int whc_set_cluster_id(struct whc *whc, u8 bcid) +{ + whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid)); + return 0; +} -- cgit v0.10.2 From df3654236e31f6cf425ed2ee5a74ceac366a7a9e Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:29 +0100 Subject: wusb: add the Wire Adapter (WA) core Common code for supporting Host Wire Adapters and Device Wire Adapters. Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile index 6504f42..7a4d007 100644 --- a/drivers/usb/wusbcore/Makefile +++ b/drivers/usb/wusbcore/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_USB_WUSB) += wusbcore.o wusb-cbaf.o +obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o wusbcore-objs := \ crypto.o \ @@ -12,3 +13,8 @@ wusbcore-objs := \ wusbhc.o wusb-cbaf-objs := cbaf.o + +wusb-wa-objs := wa-hc.o \ + wa-nep.o \ + wa-rpipe.o \ + wa-xfer.o diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c new file mode 100644 index 0000000..9d04722 --- /dev/null +++ b/drivers/usb/wusbcore/wa-hc.c @@ -0,0 +1,95 @@ +/* + * Wire Adapter Host Controller Driver + * Common items to HWA and DWA based HCDs + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + */ +#include "wusbhc.h" +#include "wa-hc.h" + +/** + * Assumes + * + * wa->usb_dev and wa->usb_iface initialized and refcounted, + * wa->wa_descr initialized. + */ +int wa_create(struct wahc *wa, struct usb_interface *iface) +{ + int result; + struct device *dev = &iface->dev; + + result = wa_rpipes_create(wa); + if (result < 0) + goto error_rpipes_create; + /* Fill up Data Transfer EP pointers */ + wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc; + wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc; + wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize); + wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL); + if (wa->xfer_result == NULL) + goto error_xfer_result_alloc; + result = wa_nep_create(wa, iface); + if (result < 0) { + dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n", + result); + goto error_nep_create; + } + return 0; + +error_nep_create: + kfree(wa->xfer_result); +error_xfer_result_alloc: + wa_rpipes_destroy(wa); +error_rpipes_create: + return result; +} +EXPORT_SYMBOL_GPL(wa_create); + + +void __wa_destroy(struct wahc *wa) +{ + if (wa->dti_urb) { + usb_kill_urb(wa->dti_urb); + usb_put_urb(wa->dti_urb); + usb_kill_urb(wa->buf_in_urb); + usb_put_urb(wa->buf_in_urb); + } + kfree(wa->xfer_result); + wa_nep_destroy(wa); + wa_rpipes_destroy(wa); +} +EXPORT_SYMBOL_GPL(__wa_destroy); + +/** + * wa_reset_all - reset the WA device + * @wa: the WA to be reset + * + * For HWAs the radio controller and all other PALs are also reset. + */ +void wa_reset_all(struct wahc *wa) +{ + /* FIXME: assuming HWA. */ + wusbhc_reset_all(wa->wusb); +} + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Wireless USB Wire Adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h new file mode 100644 index 0000000..586d350 --- /dev/null +++ b/drivers/usb/wusbcore/wa-hc.h @@ -0,0 +1,417 @@ +/* + * HWA Host Controller Driver + * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8]) + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This driver implements a USB Host Controller (struct usb_hcd) for a + * Wireless USB Host Controller based on the Wireless USB 1.0 + * Host-Wire-Adapter specification (in layman terms, a USB-dongle that + * implements a Wireless USB host). + * + * Check out the Design-overview.txt file in the source documentation + * for other details on the implementation. + * + * Main blocks: + * + * driver glue with the driver API, workqueue daemon + * + * lc RC instance life cycle management (create, destroy...) + * + * hcd glue with the USB API Host Controller Interface API. + * + * nep Notification EndPoint managent: collect notifications + * and queue them with the workqueue daemon. + * + * Handle notifications as coming from the NEP. Sends them + * off others to their respective modules (eg: connect, + * disconnect and reset go to devconnect). + * + * rpipe Remote Pipe management; rpipe is what we use to write + * to an endpoint on a WUSB device that is connected to a + * HWA RC. + * + * xfer Transfer managment -- this is all the code that gets a + * buffer and pushes it to a device (or viceversa). * + * + * Some day a lot of this code will be shared between this driver and + * the drivers for DWA (xfer, rpipe). + * + * All starts at driver.c:hwahc_probe(), when one of this guys is + * connected. hwahc_disconnect() stops it. + * + * During operation, the main driver is devices connecting or + * disconnecting. They cause the HWA RC to send notifications into + * nep.c:hwahc_nep_cb() that will dispatch them to + * notif.c:wa_notif_dispatch(). From there they will fan to cause + * device connects, disconnects, etc. + * + * Note much of the activity is difficult to follow. For example a + * device connect goes to devconnect, which will cause the "fake" root + * hub port to show a connect and stop there. Then khubd will notice + * and call into the rh.c:hwahc_rc_port_reset() code to authenticate + * the device (and this might require user intervention) and enable + * the port. + * + * We also have a timer workqueue going from devconnect.c that + * schedules in hwahc_devconnect_create(). + * + * The rest of the traffic is in the usual entry points of a USB HCD, + * which are hooked up in driver.c:hwahc_rc_driver, and defined in + * hcd.c. + */ + +#ifndef __HWAHC_INTERNAL_H__ +#define __HWAHC_INTERNAL_H__ + +#include +#include +#include +#include +#include +#include +#include + +struct wusbhc; +struct wahc; +extern void wa_urb_enqueue_run(struct work_struct *ws); + +/** + * RPipe instance + * + * @descr's fields are kept in LE, as we need to send it back and + * forth. + * + * @wa is referenced when set + * + * @segs_available is the number of requests segments that still can + * be submitted to the controller without overloading + * it. It is initialized to descr->wRequests when + * aiming. + * + * A rpipe supports a max of descr->wRequests at the same time; before + * submitting seg_lock has to be taken. If segs_avail > 0, then we can + * submit; if not, we have to queue them. + */ +struct wa_rpipe { + struct kref refcnt; + struct usb_rpipe_descriptor descr; + struct usb_host_endpoint *ep; + struct wahc *wa; + spinlock_t seg_lock; + struct list_head seg_list; + atomic_t segs_available; + u8 buffer[1]; /* For reads/writes on USB */ +}; + + +/** + * Instance of a HWA Host Controller + * + * Except where a more specific lock/mutex applies or atomic, all + * fields protected by @mutex. + * + * @wa_descr Can be accessed without locking because it is in + * the same area where the device descriptors were + * read, so it is guaranteed to exist umodified while + * the device exists. + * + * Endianess has been converted to CPU's. + * + * @nep_* can be accessed without locking as its processing is + * serialized; we submit a NEP URB and it comes to + * hwahc_nep_cb(), which won't issue another URB until it is + * done processing it. + * + * @xfer_list: + * + * List of active transfers to verify existence from a xfer id + * gotten from the xfer result message. Can't use urb->list because + * it goes by endpoint, and we don't know the endpoint at the time + * when we get the xfer result message. We can't really rely on the + * pointer (will have to change for 64 bits) as the xfer id is 32 bits. + * + * @xfer_delayed_list: List of transfers that need to be started + * (with a workqueue, because they were + * submitted from an atomic context). + * + * FIXME: this needs to be layered up: a wusbhc layer (for sharing + * comonalities with WHCI), a wa layer (for sharing + * comonalities with DWA-RC). + */ +struct wahc { + struct usb_device *usb_dev; + struct usb_interface *usb_iface; + + /* HC to deliver notifications */ + union { + struct wusbhc *wusb; + struct dwahc *dwa; + }; + + const struct usb_endpoint_descriptor *dto_epd, *dti_epd; + const struct usb_wa_descriptor *wa_descr; + + struct urb *nep_urb; /* Notification EndPoint [lockless] */ + struct edc nep_edc; + void *nep_buffer; + size_t nep_buffer_size; + + atomic_t notifs_queued; + + u16 rpipes; + unsigned long *rpipe_bm; /* rpipe usage bitmap */ + spinlock_t rpipe_bm_lock; /* protect rpipe_bm */ + struct mutex rpipe_mutex; /* assigning resources to endpoints */ + + struct urb *dti_urb; /* URB for reading xfer results */ + struct urb *buf_in_urb; /* URB for reading data in */ + struct edc dti_edc; /* DTI error density counter */ + struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */ + size_t xfer_result_size; + + s32 status; /* For reading status */ + + struct list_head xfer_list; + struct list_head xfer_delayed_list; + spinlock_t xfer_list_lock; + struct work_struct xfer_work; + atomic_t xfer_id_count; +}; + + +extern int wa_create(struct wahc *wa, struct usb_interface *iface); +extern void __wa_destroy(struct wahc *wa); +void wa_reset_all(struct wahc *wa); + + +/* Miscellaneous constants */ +enum { + /** Max number of EPROTO errors we tolerate on the NEP in a + * period of time */ + HWAHC_EPROTO_MAX = 16, + /** Period of time for EPROTO errors (in jiffies) */ + HWAHC_EPROTO_PERIOD = 4 * HZ, +}; + + +/* Notification endpoint handling */ +extern int wa_nep_create(struct wahc *, struct usb_interface *); +extern void wa_nep_destroy(struct wahc *); + +static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) +{ + struct urb *urb = wa->nep_urb; + urb->transfer_buffer = wa->nep_buffer; + urb->transfer_buffer_length = wa->nep_buffer_size; + return usb_submit_urb(urb, gfp_mask); +} + +static inline void wa_nep_disarm(struct wahc *wa) +{ + usb_kill_urb(wa->nep_urb); +} + + +/* RPipes */ +static inline void wa_rpipe_init(struct wahc *wa) +{ + spin_lock_init(&wa->rpipe_bm_lock); + mutex_init(&wa->rpipe_mutex); +} + +static inline void wa_init(struct wahc *wa) +{ + edc_init(&wa->nep_edc); + atomic_set(&wa->notifs_queued, 0); + wa_rpipe_init(wa); + edc_init(&wa->dti_edc); + INIT_LIST_HEAD(&wa->xfer_list); + INIT_LIST_HEAD(&wa->xfer_delayed_list); + spin_lock_init(&wa->xfer_list_lock); + INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); + atomic_set(&wa->xfer_id_count, 1); +} + +/** + * Destroy a pipe (when refcount drops to zero) + * + * Assumes it has been moved to the "QUIESCING" state. + */ +struct wa_xfer; +extern void rpipe_destroy(struct kref *_rpipe); +static inline +void __rpipe_get(struct wa_rpipe *rpipe) +{ + kref_get(&rpipe->refcnt); +} +extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *, + struct urb *, gfp_t); +static inline void rpipe_put(struct wa_rpipe *rpipe) +{ + kref_put(&rpipe->refcnt, rpipe_destroy); + +} +extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *); +extern int wa_rpipes_create(struct wahc *); +extern void wa_rpipes_destroy(struct wahc *); +static inline void rpipe_avail_dec(struct wa_rpipe *rpipe) +{ + atomic_dec(&rpipe->segs_available); +} + +/** + * Returns true if the rpipe is ready to submit more segments. + */ +static inline int rpipe_avail_inc(struct wa_rpipe *rpipe) +{ + return atomic_inc_return(&rpipe->segs_available) > 0 + && !list_empty(&rpipe->seg_list); +} + + +/* Transferring data */ +extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *, + struct urb *, gfp_t); +extern int wa_urb_dequeue(struct wahc *, struct urb *); +extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *); + + +/* Misc + * + * FIXME: Refcounting for the actual @hwahc object is not correct; I + * mean, this should be refcounting on the HCD underneath, but + * it is not. In any case, the semantics for HCD refcounting + * are *weird*...on refcount reaching zero it just frees + * it...no RC specific function is called...unless I miss + * something. + * + * FIXME: has to go away in favour of an 'struct' hcd based sollution + */ +static inline struct wahc *wa_get(struct wahc *wa) +{ + usb_get_intf(wa->usb_iface); + return wa; +} + +static inline void wa_put(struct wahc *wa) +{ + usb_put_intf(wa->usb_iface); +} + + +static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature) +{ + return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + feature, + wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 1000 /* FIXME: arbitrary */); +} + + +static inline int __wa_set_feature(struct wahc *wa, u16 feature) +{ + return __wa_feature(wa, 1, feature); +} + + +static inline int __wa_clear_feature(struct wahc *wa, u16 feature) +{ + return __wa_feature(wa, 0, feature); +} + + +/** + * Return the status of a Wire Adapter + * + * @wa: Wire Adapter instance + * @returns < 0 errno code on error, or status bitmap as described + * in WUSB1.0[8.3.1.6]. + * + * NOTE: need malloc, some arches don't take USB from the stack + */ +static inline +s32 __wa_get_status(struct wahc *wa) +{ + s32 result; + result = usb_control_msg( + wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), + USB_REQ_GET_STATUS, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, + &wa->status, sizeof(wa->status), + 1000 /* FIXME: arbitrary */); + if (result >= 0) + result = wa->status; + return result; +} + + +/** + * Waits until the Wire Adapter's status matches @mask/@value + * + * @wa: Wire Adapter instance. + * @returns < 0 errno code on error, otherwise status. + * + * Loop until the WAs status matches the mask and value (status & mask + * == value). Timeout if it doesn't happen. + * + * FIXME: is there an official specification on how long status + * changes can take? + */ +static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value) +{ + s32 result; + unsigned loops = 10; + do { + msleep(50); + result = __wa_get_status(wa); + if ((result & mask) == value) + break; + if (loops-- == 0) { + result = -ETIMEDOUT; + break; + } + } while (result >= 0); + return result; +} + + +/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */ +static inline int __wa_stop(struct wahc *wa) +{ + int result; + struct device *dev = &wa->usb_iface->dev; + + result = __wa_clear_feature(wa, WA_ENABLE); + if (result < 0 && result != -ENODEV) { + dev_err(dev, "error commanding HC to stop: %d\n", result); + goto out; + } + result = __wa_wait_status(wa, WA_ENABLE, 0); + if (result < 0 && result != -ENODEV) + dev_err(dev, "error waiting for HC to stop: %d\n", result); +out: + return 0; +} + + +#endif /* #ifndef __HWAHC_INTERNAL_H__ */ diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c new file mode 100644 index 0000000..3f54299 --- /dev/null +++ b/drivers/usb/wusbcore/wa-nep.c @@ -0,0 +1,310 @@ +/* + * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) + * Notification EndPoint support + * + * Copyright (C) 2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * This part takes care of getting the notification from the hw + * only and dispatching through wusbwad into + * wa_notif_dispatch. Handling is done there. + * + * WA notifications are limited in size; most of them are three or + * four bytes long, and the longest is the HWA Device Notification, + * which would not exceed 38 bytes (DNs are limited in payload to 32 + * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA + * header (WUSB1.0[8.5.4.2]). + * + * It is not clear if more than one Device Notification can be packed + * in a HWA Notification, I assume no because of the wording in + * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could + * get is 256 bytes (as the bLength field is a byte). + * + * So what we do is we have this buffer and read into it; when a + * notification arrives we schedule work to a specific, single thread + * workqueue (so notifications are serialized) and copy the + * notification data. After scheduling the work, we rearm the read from + * the notification endpoint. + * + * Entry points here are: + * + * wa_nep_[create|destroy]() To initialize/release this subsystem + * + * wa_nep_cb() Callback for the notification + * endpoint; when data is ready, this + * does the dispatching. + */ +#include +#include +#include +#include "wa-hc.h" +#include "wusbhc.h" + +/* Structure for queueing notifications to the workqueue */ +struct wa_notif_work { + struct work_struct work; + struct wahc *wa; + size_t size; + u8 data[]; +}; + +/* + * Process incoming notifications from the WA's Notification EndPoint + * [the wuswad daemon, basically] + * + * @_nw: Pointer to a descriptor which has the pointer to the + * @wa, the size of the buffer and the work queue + * structure (so we can free all when done). + * @returns 0 if ok, < 0 errno code on error. + * + * All notifications follow the same format; they need to start with a + * 'struct wa_notif_hdr' header, so it is easy to parse through + * them. We just break the buffer in individual notifications (the + * standard doesn't say if it can be done or is forbidden, so we are + * cautious) and dispatch each. + * + * So the handling layers are is: + * + * WA specific notification (from NEP) + * Device Notification Received -> wa_handle_notif_dn() + * WUSB Device notification generic handling + * BPST Adjustment -> wa_handle_notif_bpst_adj() + * ... -> ... + * + * @wa has to be referenced + */ +static void wa_notif_dispatch(struct work_struct *ws) +{ + void *itr; + u8 missing = 0; + struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work); + struct wahc *wa = nw->wa; + struct wa_notif_hdr *notif_hdr; + size_t size; + + struct device *dev = &wa->usb_iface->dev; + +#if 0 + /* FIXME: need to check for this??? */ + if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */ + goto out; /* screw it */ +#endif + atomic_dec(&wa->notifs_queued); /* Throttling ctl */ + dev = &wa->usb_iface->dev; + size = nw->size; + itr = nw->data; + + while (size) { + if (size < sizeof(*notif_hdr)) { + missing = sizeof(*notif_hdr) - size; + goto exhausted_buffer; + } + notif_hdr = itr; + if (size < notif_hdr->bLength) + goto exhausted_buffer; + itr += notif_hdr->bLength; + size -= notif_hdr->bLength; + /* Dispatch the notification [don't use itr or size!] */ + switch (notif_hdr->bNotifyType) { + case HWA_NOTIF_DN: { + struct hwa_notif_dn *hwa_dn; + hwa_dn = container_of(notif_hdr, struct hwa_notif_dn, + hdr); + wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr, + hwa_dn->dndata, + notif_hdr->bLength - sizeof(*hwa_dn)); + break; + } + case WA_NOTIF_TRANSFER: + wa_handle_notif_xfer(wa, notif_hdr); + break; + case DWA_NOTIF_RWAKE: + case DWA_NOTIF_PORTSTATUS: + case HWA_NOTIF_BPST_ADJ: + /* FIXME: unimplemented WA NOTIFs */ + /* fallthru */ + default: + if (printk_ratelimit()) { + dev_err(dev, "HWA: unknown notification 0x%x, " + "%zu bytes; discarding\n", + notif_hdr->bNotifyType, + (size_t)notif_hdr->bLength); + dump_bytes(dev, notif_hdr, 16); + } + break; + } + } +out: + wa_put(wa); + kfree(nw); + return; + + /* THIS SHOULD NOT HAPPEN + * + * Buffer exahusted with partial data remaining; just warn and + * discard the data, as this should not happen. + */ +exhausted_buffer: + if (!printk_ratelimit()) + goto out; + dev_warn(dev, "HWA: device sent short notification, " + "%d bytes missing; discarding %d bytes.\n", + missing, (int)size); + dump_bytes(dev, itr, size); + goto out; +} + +/* + * Deliver incoming WA notifications to the wusbwa workqueue + * + * @wa: Pointer the Wire Adapter Controller Data Streaming + * instance (part of an 'struct usb_hcd'). + * @size: Size of the received buffer + * @returns 0 if ok, < 0 errno code on error. + * + * The input buffer is @wa->nep_buffer, with @size bytes + * (guaranteed to fit in the allocated space, + * @wa->nep_buffer_size). + */ +static int wa_nep_queue(struct wahc *wa, size_t size) +{ + int result = 0; + struct device *dev = &wa->usb_iface->dev; + struct wa_notif_work *nw; + + /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */ + BUG_ON(size > wa->nep_buffer_size); + if (size == 0) + goto out; + if (atomic_read(&wa->notifs_queued) > 200) { + if (printk_ratelimit()) + dev_err(dev, "Too many notifications queued, " + "throttling back\n"); + goto out; + } + nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC); + if (nw == NULL) { + if (printk_ratelimit()) + dev_err(dev, "No memory to queue notification\n"); + goto out; + } + INIT_WORK(&nw->work, wa_notif_dispatch); + nw->wa = wa_get(wa); + nw->size = size; + memcpy(nw->data, wa->nep_buffer, size); + atomic_inc(&wa->notifs_queued); /* Throttling ctl */ + queue_work(wusbd, &nw->work); +out: + /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */ + return result; +} + +/* + * Callback for the notification event endpoint + * + * Check's that everything is fine and then passes the data to be + * queued to the workqueue. + */ +static void wa_nep_cb(struct urb *urb) +{ + int result; + struct wahc *wa = urb->context; + struct device *dev = &wa->usb_iface->dev; + + switch (result = urb->status) { + case 0: + result = wa_nep_queue(wa, urb->actual_length); + if (result < 0) + dev_err(dev, "NEP: unable to process notification(s): " + "%d\n", result); + break; + case -ECONNRESET: /* Not an error, but a controlled situation; */ + case -ENOENT: /* (we killed the URB)...so, no broadcast */ + case -ESHUTDOWN: + dev_dbg(dev, "NEP: going down %d\n", urb->status); + goto out; + default: /* On general errors, we retry unless it gets ugly */ + if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "NEP: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + goto out; + } + dev_err(dev, "NEP: URB error %d\n", urb->status); + } + result = wa_nep_arm(wa, GFP_ATOMIC); + if (result < 0) { + dev_err(dev, "NEP: cannot submit URB: %d\n", result); + wa_reset_all(wa); + } +out: + return; +} + +/* + * Initialize @wa's notification and event's endpoint stuff + * + * This includes the allocating the read buffer, the context ID + * allocation bitmap, the URB and submitting the URB. + */ +int wa_nep_create(struct wahc *wa, struct usb_interface *iface) +{ + int result; + struct usb_endpoint_descriptor *epd; + struct usb_device *usb_dev = interface_to_usbdev(iface); + struct device *dev = &iface->dev; + + edc_init(&wa->nep_edc); + epd = &iface->cur_altsetting->endpoint[0].desc; + wa->nep_buffer_size = 1024; + wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL); + if (wa->nep_buffer == NULL) { + dev_err(dev, "Unable to allocate notification's read buffer\n"); + goto error_nep_buffer; + } + wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL); + if (wa->nep_urb == NULL) { + dev_err(dev, "Unable to allocate notification URB\n"); + goto error_urb_alloc; + } + usb_fill_int_urb(wa->nep_urb, usb_dev, + usb_rcvintpipe(usb_dev, epd->bEndpointAddress), + wa->nep_buffer, wa->nep_buffer_size, + wa_nep_cb, wa, epd->bInterval); + result = wa_nep_arm(wa, GFP_KERNEL); + if (result < 0) { + dev_err(dev, "Cannot submit notification URB: %d\n", result); + goto error_nep_arm; + } + return 0; + +error_nep_arm: + usb_free_urb(wa->nep_urb); +error_urb_alloc: + kfree(wa->nep_buffer); +error_nep_buffer: + return -ENOMEM; +} + +void wa_nep_destroy(struct wahc *wa) +{ + wa_nep_disarm(wa); + usb_free_urb(wa->nep_urb); + kfree(wa->nep_buffer); +} diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c new file mode 100644 index 0000000..bfe3752 --- /dev/null +++ b/drivers/usb/wusbcore/wa-rpipe.c @@ -0,0 +1,562 @@ +/* + * WUSB Wire Adapter + * rpipe management + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * FIXME: docs + * + * RPIPE + * + * Targetted at different downstream endpoints + * + * Descriptor: use to config the remote pipe. + * + * The number of blocks could be dynamic (wBlocks in descriptor is + * 0)--need to schedule them then. + * + * Each bit in wa->rpipe_bm represents if an rpipe is being used or + * not. Rpipes are represented with a 'struct wa_rpipe' that is + * attached to the hcpriv member of a 'struct usb_host_endpoint'. + * + * When you need to xfer data to an endpoint, you get an rpipe for it + * with wa_ep_rpipe_get(), which gives you a reference to the rpipe + * and keeps a single one (the first one) with the endpoint. When you + * are done transferring, you drop that reference. At the end the + * rpipe is always allocated and bound to the endpoint. There it might + * be recycled when not used. + * + * Addresses: + * + * We use a 1:1 mapping mechanism between port address (0 based + * index, actually) and the address. The USB stack knows about this. + * + * USB Stack port number 4 (1 based) + * WUSB code port index 3 (0 based) + * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub) + * + * Now, because we don't use the concept as default address exactly + * like the (wired) USB code does, we need to kind of skip it. So we + * never take addresses from the urb->pipe, but from the + * urb->dev->devnum, to make sure that we always have the right + * destination address. + */ +#include +#include +#include +#include "wusbhc.h" +#include "wa-hc.h" + +#define D_LOCAL 0 +#include + + +static int __rpipe_get_descr(struct wahc *wa, + struct usb_rpipe_descriptor *descr, u16 index) +{ + ssize_t result; + struct device *dev = &wa->usb_iface->dev; + + /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() + * function because the arguments are different. + */ + d_printf(1, dev, "rpipe %u: get descr\n", index); + result = usb_control_msg( + wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), + USB_REQ_GET_DESCRIPTOR, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE, + USB_DT_RPIPE<<8, index, descr, sizeof(*descr), + 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "rpipe %u: get descriptor failed: %d\n", + index, (int)result); + goto error; + } + if (result < sizeof(*descr)) { + dev_err(dev, "rpipe %u: got short descriptor " + "(%zd vs %zd bytes needed)\n", + index, result, sizeof(*descr)); + result = -EINVAL; + goto error; + } + result = 0; + +error: + return result; +} + +/* + * + * The descriptor is assumed to be properly initialized (ie: you got + * it through __rpipe_get_descr()). + */ +static int __rpipe_set_descr(struct wahc *wa, + struct usb_rpipe_descriptor *descr, u16 index) +{ + ssize_t result; + struct device *dev = &wa->usb_iface->dev; + + /* we cannot use the usb_get_descriptor() function because the + * arguments are different. + */ + d_printf(1, dev, "rpipe %u: set descr\n", index); + result = usb_control_msg( + wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + USB_REQ_SET_DESCRIPTOR, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, + USB_DT_RPIPE<<8, index, descr, sizeof(*descr), + HZ / 10); + if (result < 0) { + dev_err(dev, "rpipe %u: set descriptor failed: %d\n", + index, (int)result); + goto error; + } + if (result < sizeof(*descr)) { + dev_err(dev, "rpipe %u: sent short descriptor " + "(%zd vs %zd bytes required)\n", + index, result, sizeof(*descr)); + result = -EINVAL; + goto error; + } + result = 0; + +error: + return result; + +} + +static void rpipe_init(struct wa_rpipe *rpipe) +{ + kref_init(&rpipe->refcnt); + spin_lock_init(&rpipe->seg_lock); + INIT_LIST_HEAD(&rpipe->seg_list); +} + +static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx) +{ + unsigned long flags; + + spin_lock_irqsave(&wa->rpipe_bm_lock, flags); + rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx); + if (rpipe_idx < wa->rpipes) + set_bit(rpipe_idx, wa->rpipe_bm); + spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); + + return rpipe_idx; +} + +static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx) +{ + unsigned long flags; + + spin_lock_irqsave(&wa->rpipe_bm_lock, flags); + clear_bit(rpipe_idx, wa->rpipe_bm); + spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags); +} + +void rpipe_destroy(struct kref *_rpipe) +{ + struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); + u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); + d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index); + if (rpipe->ep) + rpipe->ep->hcpriv = NULL; + rpipe_put_idx(rpipe->wa, index); + wa_put(rpipe->wa); + kfree(rpipe); + d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index); +} +EXPORT_SYMBOL_GPL(rpipe_destroy); + +/* + * Locate an idle rpipe, create an structure for it and return it + * + * @wa is referenced and unlocked + * @crs enum rpipe_attr, required endpoint characteristics + * + * The rpipe can be used only sequentially (not in parallel). + * + * The rpipe is moved into the "ready" state. + */ +static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs, + gfp_t gfp) +{ + int result; + unsigned rpipe_idx; + struct wa_rpipe *rpipe; + struct device *dev = &wa->usb_iface->dev; + + d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs); + rpipe = kzalloc(sizeof(*rpipe), gfp); + if (rpipe == NULL) + return -ENOMEM; + rpipe_init(rpipe); + + /* Look for an idle pipe */ + for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) { + rpipe_idx = rpipe_get_idx(wa, rpipe_idx); + if (rpipe_idx >= wa->rpipes) /* no more pipes :( */ + break; + result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx); + if (result < 0) + dev_err(dev, "Can't get descriptor for rpipe %u: %d\n", + rpipe_idx, result); + else if ((rpipe->descr.bmCharacteristics & crs) != 0) + goto found; + rpipe_put_idx(wa, rpipe_idx); + } + *prpipe = NULL; + kfree(rpipe); + d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs); + return -ENXIO; + +found: + set_bit(rpipe_idx, wa->rpipe_bm); + rpipe->wa = wa_get(wa); + *prpipe = rpipe; + d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs); + return 0; +} + +static int __rpipe_reset(struct wahc *wa, unsigned index) +{ + int result; + struct device *dev = &wa->usb_iface->dev; + + d_printf(1, dev, "rpipe %u: reset\n", index); + result = usb_control_msg( + wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + USB_REQ_RPIPE_RESET, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, + 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0) + dev_err(dev, "rpipe %u: reset failed: %d\n", + index, result); + return result; +} + +/* + * Fake companion descriptor for ep0 + * + * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl + */ +static struct usb_wireless_ep_comp_descriptor epc0 = { + .bLength = sizeof(epc0), + .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP, +/* .bMaxBurst = 1, */ + .bMaxSequence = 31, +}; + +/* + * Look for EP companion descriptor + * + * Get there, look for Inara in the endpoint's extra descriptors + */ +static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find( + struct device *dev, struct usb_host_endpoint *ep) +{ + void *itr; + size_t itr_size; + struct usb_descriptor_header *hdr; + struct usb_wireless_ep_comp_descriptor *epcd; + + d_fnstart(3, dev, "(ep %p)\n", ep); + if (ep->desc.bEndpointAddress == 0) { + epcd = &epc0; + goto out; + } + itr = ep->extra; + itr_size = ep->extralen; + epcd = NULL; + while (itr_size > 0) { + if (itr_size < sizeof(*hdr)) { + dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors " + "at offset %zu: only %zu bytes left\n", + ep->desc.bEndpointAddress, + itr - (void *) ep->extra, itr_size); + break; + } + hdr = itr; + if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) { + epcd = itr; + break; + } + if (hdr->bLength > itr_size) { + dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor " + "at offset %zu (type 0x%02x) " + "length %d but only %zu bytes left\n", + ep->desc.bEndpointAddress, + itr - (void *) ep->extra, hdr->bDescriptorType, + hdr->bLength, itr_size); + break; + } + itr += hdr->bLength; + itr_size -= hdr->bDescriptorType; + } +out: + d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd); + return epcd; +} + +/* + * Aim an rpipe to its device & endpoint destination + * + * Make sure we change the address to unauthenticathed if the device + * is WUSB and it is not authenticated. + */ +static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, + struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp) +{ + int result = -ENOMSG; /* better code for lack of companion? */ + struct device *dev = &wa->usb_iface->dev; + struct usb_device *usb_dev = urb->dev; + struct usb_wireless_ep_comp_descriptor *epcd; + u8 unauth; + + d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", + rpipe, wa, ep, urb); + epcd = rpipe_epc_find(dev, ep); + if (epcd == NULL) { + dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", + ep->desc.bEndpointAddress); + goto error; + } + unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0; + __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex)); + atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests)); + /* FIXME: block allocation system; request with queuing and timeout */ + /* FIXME: compute so seg_size > ep->maxpktsize */ + rpipe->descr.wBlocks = cpu_to_le16(16); /* given */ + /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */ + rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize); + rpipe->descr.bHSHubAddress = 0; /* reserved: zero */ + rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum); + /* FIXME: use maximum speed as supported or recommended by device */ + rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? + UWB_PHY_RATE_53 : UWB_PHY_RATE_200; + d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", + urb->dev->devnum, urb->dev->devnum | unauth, + le16_to_cpu(rpipe->descr.wRPipeIndex), + usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); + /* see security.c:wusb_update_address() */ + if (unlikely(urb->dev->devnum == 0x80)) + rpipe->descr.bDeviceAddress = 0; + else + rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth; + rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress; + /* FIXME: bDataSequence */ + rpipe->descr.bDataSequence = 0; + /* FIXME: dwCurrentWindow */ + rpipe->descr.dwCurrentWindow = cpu_to_le32(1); + /* FIXME: bMaxDataSequence */ + rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1; + rpipe->descr.bInterval = ep->desc.bInterval; + /* FIXME: bOverTheAirInterval */ + rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */ + /* FIXME: xmit power & preamble blah blah */ + rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03; + /* rpipe->descr.bmCharacteristics RO */ + /* FIXME: bmRetryOptions */ + rpipe->descr.bmRetryOptions = 15; + /* FIXME: use for assessing link quality? */ + rpipe->descr.wNumTransactionErrors = 0; + result = __rpipe_set_descr(wa, &rpipe->descr, + le16_to_cpu(rpipe->descr.wRPipeIndex)); + if (result < 0) { + dev_err(dev, "Cannot aim rpipe: %d\n", result); + goto error; + } + result = 0; +error: + d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n", + rpipe, wa, ep, urb, result); + return result; +} + +/* + * Check an aimed rpipe to make sure it points to where we want + * + * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth + * space; when it is like that, we or 0x80 to make an unauth address. + */ +static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa, + const struct usb_host_endpoint *ep, + const struct urb *urb, gfp_t gfp) +{ + int result = 0; /* better code for lack of companion? */ + struct device *dev = &wa->usb_iface->dev; + struct usb_device *usb_dev = urb->dev; + u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; + u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); + + d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", + rpipe, wa, ep, urb); +#define AIM_CHECK(rdf, val, text) \ + do { \ + if (rpipe->descr.rdf != (val)) { \ + dev_err(dev, \ + "rpipe aim discrepancy: " #rdf " " text "\n", \ + rpipe->descr.rdf, (val)); \ + result = -EINVAL; \ + WARN_ON(1); \ + } \ + } while (0) + AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize), + "(%u vs %u)"); + AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)"); + AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ? + UWB_PHY_RATE_53 : UWB_PHY_RATE_200, + "(%u vs %u)"); + AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)"); + AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)"); + AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)"); + AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)"); +#undef AIM_CHECK + return result; +} + +#ifndef CONFIG_BUG +#define CONFIG_BUG 0 +#endif + +/* + * Make sure there is an rpipe allocated for an endpoint + * + * If already allocated, we just refcount it; if not, we get an + * idle one, aim it to the right location and take it. + * + * Attaches to ep->hcpriv and rpipe->ep to ep. + */ +int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, + struct urb *urb, gfp_t gfp) +{ + int result = 0; + struct device *dev = &wa->usb_iface->dev; + struct wa_rpipe *rpipe; + u8 eptype; + + d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, + gfp); + mutex_lock(&wa->rpipe_mutex); + rpipe = ep->hcpriv; + if (rpipe != NULL) { + if (CONFIG_BUG == 1) { + result = rpipe_check_aim(rpipe, wa, ep, urb, gfp); + if (result < 0) + goto error; + } + __rpipe_get(rpipe); + d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n", + ep->desc.bEndpointAddress, + le16_to_cpu(rpipe->descr.wRPipeIndex)); + } else { + /* hmm, assign idle rpipe, aim it */ + result = -ENOBUFS; + eptype = ep->desc.bmAttributes & 0x03; + result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp); + if (result < 0) + goto error; + result = rpipe_aim(rpipe, wa, ep, urb, gfp); + if (result < 0) { + rpipe_put(rpipe); + goto error; + } + ep->hcpriv = rpipe; + rpipe->ep = ep; + __rpipe_get(rpipe); /* for caching into ep->hcpriv */ + d_printf(2, dev, "ep 0x%02x: using rpipe %u\n", + ep->desc.bEndpointAddress, + le16_to_cpu(rpipe->descr.wRPipeIndex)); + } + d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr)); +error: + mutex_unlock(&wa->rpipe_mutex); + d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp); + return result; +} + +/* + * Allocate the bitmap for each rpipe. + */ +int wa_rpipes_create(struct wahc *wa) +{ + wa->rpipes = wa->wa_descr->wNumRPipes; + wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long), + GFP_KERNEL); + if (wa->rpipe_bm == NULL) + return -ENOMEM; + return 0; +} + +void wa_rpipes_destroy(struct wahc *wa) +{ + struct device *dev = &wa->usb_iface->dev; + d_fnstart(3, dev, "(wa %p)\n", wa); + if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { + char buf[256]; + WARN_ON(1); + bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes); + dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); + } + kfree(wa->rpipe_bm); + d_fnend(3, dev, "(wa %p)\n", wa); +} + +/* + * Release resources allocated for an endpoint + * + * If there is an associated rpipe to this endpoint, Abort any pending + * transfers and put it. If the rpipe ends up being destroyed, + * __rpipe_destroy() will cleanup ep->hcpriv. + * + * This is called before calling hcd->stop(), so you don't need to do + * anything else in there. + */ +void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) +{ + struct device *dev = &wa->usb_iface->dev; + struct wa_rpipe *rpipe; + d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep); + mutex_lock(&wa->rpipe_mutex); + rpipe = ep->hcpriv; + if (rpipe != NULL) { + unsigned rc = atomic_read(&rpipe->refcnt.refcount); + int result; + u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); + + if (rc != 1) + d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n", + wa, ep, rpipe, rc); + + d_printf(1, dev, "rpipe %u: abort\n", index); + result = usb_control_msg( + wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), + USB_REQ_RPIPE_ABORT, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE, + 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0 && result != -ENODEV /* dev is gone */) + d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n", + wa, index, result); + rpipe_put(rpipe); + } + mutex_unlock(&wa->rpipe_mutex); + d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep); + return; +} +EXPORT_SYMBOL_GPL(rpipe_ep_disable); diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c new file mode 100644 index 0000000..7d192f3 --- /dev/null +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -0,0 +1,1709 @@ +/* + * WUSB Wire Adapter + * Data transfer and URB enqueing + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * How transfers work: get a buffer, break it up in segments (segment + * size is a multiple of the maxpacket size). For each segment issue a + * segment request (struct wa_xfer_*), then send the data buffer if + * out or nothing if in (all over the DTO endpoint). + * + * For each submitted segment request, a notification will come over + * the NEP endpoint and a transfer result (struct xfer_result) will + * arrive in the DTI URB. Read it, get the xfer ID, see if there is + * data coming (inbound transfer), schedule a read and handle it. + * + * Sounds simple, it is a pain to implement. + * + * + * ENTRY POINTS + * + * FIXME + * + * LIFE CYCLE / STATE DIAGRAM + * + * FIXME + * + * THIS CODE IS DISGUSTING + * + * Warned you are; it's my second try and still not happy with it. + * + * NOTES: + * + * - No iso + * + * - Supports DMA xfers, control, bulk and maybe interrupt + * + * - Does not recycle unused rpipes + * + * An rpipe is assigned to an endpoint the first time it is used, + * and then it's there, assigned, until the endpoint is disabled + * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the + * rpipe to the endpoint is done under the wa->rpipe_sem semaphore + * (should be a mutex). + * + * Two methods it could be done: + * + * (a) set up a timer everytime an rpipe's use count drops to 1 + * (which means unused) or when a transfer ends. Reset the + * timer when a xfer is queued. If the timer expires, release + * the rpipe [see rpipe_ep_disable()]. + * + * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], + * when none are found go over the list, check their endpoint + * and their activity record (if no last-xfer-done-ts in the + * last x seconds) take it + * + * However, due to the fact that we have a set of limited + * resources (max-segments-at-the-same-time per xfer, + * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end + * we are going to have to rebuild all this based on an scheduler, + * to where we have a list of transactions to do and based on the + * availability of the different requried components (blocks, + * rpipes, segment slots, etc), we go scheduling them. Painful. + */ +#include +#include +#include +#include "wa-hc.h" +#include "wusbhc.h" + +#undef D_LOCAL +#define D_LOCAL 0 /* 0 disabled, > 0 different levels... */ +#include + +enum { + WA_SEGS_MAX = 255, +}; + +enum wa_seg_status { + WA_SEG_NOTREADY, + WA_SEG_READY, + WA_SEG_DELAYED, + WA_SEG_SUBMITTED, + WA_SEG_PENDING, + WA_SEG_DTI_PENDING, + WA_SEG_DONE, + WA_SEG_ERROR, + WA_SEG_ABORTED, +}; + +static void wa_xfer_delayed_run(struct wa_rpipe *); + +/* + * Life cycle governed by 'struct urb' (the refcount of the struct is + * that of the 'struct urb' and usb_free_urb() would free the whole + * struct). + */ +struct wa_seg { + struct urb urb; + struct urb *dto_urb; /* for data output? */ + struct list_head list_node; /* for rpipe->req_list */ + struct wa_xfer *xfer; /* out xfer */ + u8 index; /* which segment we are */ + enum wa_seg_status status; + ssize_t result; /* bytes xfered or error */ + struct wa_xfer_hdr xfer_hdr; + u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */ +}; + +static void wa_seg_init(struct wa_seg *seg) +{ + /* usb_init_urb() repeats a lot of work, so we do it here */ + kref_init(&seg->urb.kref); +} + +/* + * Protected by xfer->lock + * + */ +struct wa_xfer { + struct kref refcnt; + struct list_head list_node; + spinlock_t lock; + u32 id; + + struct wahc *wa; /* Wire adapter we are plugged to */ + struct usb_host_endpoint *ep; + struct urb *urb; /* URB we are transfering for */ + struct wa_seg **seg; /* transfer segments */ + u8 segs, segs_submitted, segs_done; + unsigned is_inbound:1; + unsigned is_dma:1; + size_t seg_size; + int result; + + gfp_t gfp; /* allocation mask */ + + struct wusb_dev *wusb_dev; /* for activity timestamps */ +}; + +static inline void wa_xfer_init(struct wa_xfer *xfer) +{ + kref_init(&xfer->refcnt); + INIT_LIST_HEAD(&xfer->list_node); + spin_lock_init(&xfer->lock); +} + +/* + * Destory a transfer structure + * + * Note that the xfer->seg[index] thingies follow the URB life cycle, + * so we need to put them, not free them. + */ +static void wa_xfer_destroy(struct kref *_xfer) +{ + struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); + if (xfer->seg) { + unsigned cnt; + for (cnt = 0; cnt < xfer->segs; cnt++) { + if (xfer->is_inbound) + usb_put_urb(xfer->seg[cnt]->dto_urb); + usb_put_urb(&xfer->seg[cnt]->urb); + } + } + kfree(xfer); + d_printf(2, NULL, "xfer %p destroyed\n", xfer); +} + +static void wa_xfer_get(struct wa_xfer *xfer) +{ + kref_get(&xfer->refcnt); +} + +static void wa_xfer_put(struct wa_xfer *xfer) +{ + d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n", + xfer, atomic_read(&xfer->refcnt.refcount)); + kref_put(&xfer->refcnt, wa_xfer_destroy); + d_fnend(3, NULL, "(xfer %p) = void\n", xfer); +} + +/* + * xfer is referenced + * + * xfer->lock has to be unlocked + * + * We take xfer->lock for setting the result; this is a barrier + * against drivers/usb/core/hcd.c:unlink1() being called after we call + * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a + * reference to the transfer. + */ +static void wa_xfer_giveback(struct wa_xfer *xfer) +{ + unsigned long flags; + d_fnstart(3, NULL, "(xfer %p)\n", xfer); + spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); + list_del_init(&xfer->list_node); + spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); + /* FIXME: segmentation broken -- kills DWA */ + wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); + wa_put(xfer->wa); + wa_xfer_put(xfer); + d_fnend(3, NULL, "(xfer %p) = void\n", xfer); +} + +/* + * xfer is referenced + * + * xfer->lock has to be unlocked + */ +static void wa_xfer_completion(struct wa_xfer *xfer) +{ + d_fnstart(3, NULL, "(xfer %p)\n", xfer); + if (xfer->wusb_dev) + wusb_dev_put(xfer->wusb_dev); + rpipe_put(xfer->ep->hcpriv); + wa_xfer_giveback(xfer); + d_fnend(3, NULL, "(xfer %p) = void\n", xfer); + return; +} + +/* + * If transfer is done, wrap it up and return true + * + * xfer->lock has to be locked + */ +static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) +{ + unsigned result, cnt; + struct wa_seg *seg; + struct urb *urb = xfer->urb; + unsigned found_short = 0; + + d_fnstart(3, NULL, "(xfer %p)\n", xfer); + result = xfer->segs_done == xfer->segs_submitted; + if (result == 0) + goto out; + urb->actual_length = 0; + for (cnt = 0; cnt < xfer->segs; cnt++) { + seg = xfer->seg[cnt]; + switch (seg->status) { + case WA_SEG_DONE: + if (found_short && seg->result > 0) { + if (printk_ratelimit()) + printk(KERN_ERR "xfer %p#%u: bad short " + "segments (%zu)\n", xfer, cnt, + seg->result); + urb->status = -EINVAL; + goto out; + } + urb->actual_length += seg->result; + if (seg->result < xfer->seg_size + && cnt != xfer->segs-1) + found_short = 1; + d_printf(2, NULL, "xfer %p#%u: DONE short %d " + "result %zu urb->actual_length %d\n", + xfer, seg->index, found_short, seg->result, + urb->actual_length); + break; + case WA_SEG_ERROR: + xfer->result = seg->result; + d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n", + xfer, seg->index, seg->result); + goto out; + case WA_SEG_ABORTED: + WARN_ON(urb->status != -ECONNRESET + && urb->status != -ENOENT); + d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n", + xfer, seg->index, urb->status); + xfer->result = urb->status; + goto out; + default: + /* if (printk_ratelimit()) */ + printk(KERN_ERR "xfer %p#%u: " + "is_done bad state %d\n", + xfer, cnt, seg->status); + xfer->result = -EINVAL; + WARN_ON(1); + goto out; + } + } + xfer->result = 0; +out: + d_fnend(3, NULL, "(xfer %p) = void\n", xfer); + return result; +} + +/* + * Initialize a transfer's ID + * + * We need to use a sequential number; if we use the pointer or the + * hash of the pointer, it can repeat over sequential transfers and + * then it will confuse the HWA....wonder why in hell they put a 32 + * bit handle in there then. + */ +static void wa_xfer_id_init(struct wa_xfer *xfer) +{ + xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); +} + +/* + * Return the xfer's ID associated with xfer + * + * Need to generate a + */ +static u32 wa_xfer_id(struct wa_xfer *xfer) +{ + return xfer->id; +} + +/* + * Search for a transfer list ID on the HCD's URB list + * + * For 32 bit architectures, we use the pointer itself; for 64 bits, a + * 32-bit hash of the pointer. + * + * @returns NULL if not found. + */ +static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) +{ + unsigned long flags; + struct wa_xfer *xfer_itr; + spin_lock_irqsave(&wa->xfer_list_lock, flags); + list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { + if (id == xfer_itr->id) { + wa_xfer_get(xfer_itr); + goto out; + } + } + xfer_itr = NULL; +out: + spin_unlock_irqrestore(&wa->xfer_list_lock, flags); + return xfer_itr; +} + +struct wa_xfer_abort_buffer { + struct urb urb; + struct wa_xfer_abort cmd; +}; + +static void __wa_xfer_abort_cb(struct urb *urb) +{ + struct wa_xfer_abort_buffer *b = urb->context; + usb_put_urb(&b->urb); +} + +/* + * Aborts an ongoing transaction + * + * Assumes the transfer is referenced and locked and in a submitted + * state (mainly that there is an endpoint/rpipe assigned). + * + * The callback (see above) does nothing but freeing up the data by + * putting the URB. Because the URB is allocated at the head of the + * struct, the whole space we allocated is kfreed. + * + * We'll get an 'aborted transaction' xfer result on DTI, that'll + * politely ignore because at this point the transaction has been + * marked as aborted already. + */ +static void __wa_xfer_abort(struct wa_xfer *xfer) +{ + int result; + struct device *dev = &xfer->wa->usb_iface->dev; + struct wa_xfer_abort_buffer *b; + struct wa_rpipe *rpipe = xfer->ep->hcpriv; + + b = kmalloc(sizeof(*b), GFP_ATOMIC); + if (b == NULL) + goto error_kmalloc; + b->cmd.bLength = sizeof(b->cmd); + b->cmd.bRequestType = WA_XFER_ABORT; + b->cmd.wRPipe = rpipe->descr.wRPipeIndex; + b->cmd.dwTransferID = wa_xfer_id(xfer); + + usb_init_urb(&b->urb); + usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, + usb_sndbulkpipe(xfer->wa->usb_dev, + xfer->wa->dto_epd->bEndpointAddress), + &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); + result = usb_submit_urb(&b->urb, GFP_ATOMIC); + if (result < 0) + goto error_submit; + return; /* callback frees! */ + + +error_submit: + if (printk_ratelimit()) + dev_err(dev, "xfer %p: Can't submit abort request: %d\n", + xfer, result); + kfree(b); +error_kmalloc: + return; + +} + +/* + * + * @returns < 0 on error, transfer segment request size if ok + */ +static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, + enum wa_xfer_type *pxfer_type) +{ + ssize_t result; + struct device *dev = &xfer->wa->usb_iface->dev; + size_t maxpktsize; + struct urb *urb = xfer->urb; + struct wa_rpipe *rpipe = xfer->ep->hcpriv; + + d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", + xfer, rpipe, urb); + switch (rpipe->descr.bmAttribute & 0x3) { + case USB_ENDPOINT_XFER_CONTROL: + *pxfer_type = WA_XFER_TYPE_CTL; + result = sizeof(struct wa_xfer_ctl); + break; + case USB_ENDPOINT_XFER_INT: + case USB_ENDPOINT_XFER_BULK: + *pxfer_type = WA_XFER_TYPE_BI; + result = sizeof(struct wa_xfer_bi); + break; + case USB_ENDPOINT_XFER_ISOC: + dev_err(dev, "FIXME: ISOC not implemented\n"); + result = -ENOSYS; + goto error; + default: + /* never happens */ + BUG(); + result = -EINVAL; /* shut gcc up */ + }; + xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; + xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; + xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) + * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); + /* Compute the segment size and make sure it is a multiple of + * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of + * a check (FIXME) */ + maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); + if (xfer->seg_size < maxpktsize) { + dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " + "%zu\n", xfer->seg_size, maxpktsize); + result = -EINVAL; + goto error; + } + xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; + xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1) + / xfer->seg_size; + if (xfer->segs >= WA_SEGS_MAX) { + dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", + (int)(urb->transfer_buffer_length / xfer->seg_size), + WA_SEGS_MAX); + result = -EINVAL; + goto error; + } + if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) + xfer->segs = 1; +error: + d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", + xfer, rpipe, urb, (int)result); + return result; +} + +/** Fill in the common request header and xfer-type specific data. */ +static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, + struct wa_xfer_hdr *xfer_hdr0, + enum wa_xfer_type xfer_type, + size_t xfer_hdr_size) +{ + struct wa_rpipe *rpipe = xfer->ep->hcpriv; + + xfer_hdr0 = &xfer->seg[0]->xfer_hdr; + xfer_hdr0->bLength = xfer_hdr_size; + xfer_hdr0->bRequestType = xfer_type; + xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; + xfer_hdr0->dwTransferID = wa_xfer_id(xfer); + xfer_hdr0->bTransferSegment = 0; + switch (xfer_type) { + case WA_XFER_TYPE_CTL: { + struct wa_xfer_ctl *xfer_ctl = + container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); + xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; + BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP + && xfer->urb->setup_packet == NULL); + memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, + sizeof(xfer_ctl->baSetupData)); + break; + } + case WA_XFER_TYPE_BI: + break; + case WA_XFER_TYPE_ISO: + printk(KERN_ERR "FIXME: ISOC not implemented\n"); + default: + BUG(); + }; +} + +/* + * Callback for the OUT data phase of the segment request + * + * Check wa_seg_cb(); most comments also apply here because this + * function does almost the same thing and they work closely + * together. + * + * If the seg request has failed but this DTO phase has suceeded, + * wa_seg_cb() has already failed the segment and moved the + * status to WA_SEG_ERROR, so this will go through 'case 0' and + * effectively do nothing. + */ +static void wa_seg_dto_cb(struct urb *urb) +{ + struct wa_seg *seg = urb->context; + struct wa_xfer *xfer = seg->xfer; + struct wahc *wa; + struct device *dev; + struct wa_rpipe *rpipe; + unsigned long flags; + unsigned rpipe_ready = 0; + u8 done = 0; + + d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); + switch (urb->status) { + case 0: + spin_lock_irqsave(&xfer->lock, flags); + wa = xfer->wa; + dev = &wa->usb_iface->dev; + d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n", + xfer, seg->index, urb->actual_length); + if (seg->status < WA_SEG_PENDING) + seg->status = WA_SEG_PENDING; + seg->result = urb->actual_length; + spin_unlock_irqrestore(&xfer->lock, flags); + break; + case -ECONNRESET: /* URB unlinked; no need to do anything */ + case -ENOENT: /* as it was done by the who unlinked us */ + break; + default: /* Other errors ... */ + spin_lock_irqsave(&xfer->lock, flags); + wa = xfer->wa; + dev = &wa->usb_iface->dev; + rpipe = xfer->ep->hcpriv; + if (printk_ratelimit()) + dev_err(dev, "xfer %p#%u: data out error %d\n", + xfer, seg->index, urb->status); + if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)){ + dev_err(dev, "DTO: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + } + if (seg->status != WA_SEG_ERROR) { + seg->status = WA_SEG_ERROR; + seg->result = urb->status; + xfer->segs_done++; + __wa_xfer_abort(xfer); + rpipe_ready = rpipe_avail_inc(rpipe); + done = __wa_xfer_is_done(xfer); + } + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + } + d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); +} + +/* + * Callback for the segment request + * + * If succesful transition state (unless already transitioned or + * outbound transfer); otherwise, take a note of the error, mark this + * segment done and try completion. + * + * Note we don't access until we are sure that the transfer hasn't + * been cancelled (ECONNRESET, ENOENT), which could mean that + * seg->xfer could be already gone. + * + * We have to check before setting the status to WA_SEG_PENDING + * because sometimes the xfer result callback arrives before this + * callback (geeeeeeze), so it might happen that we are already in + * another state. As well, we don't set it if the transfer is inbound, + * as in that case, wa_seg_dto_cb will do it when the OUT data phase + * finishes. + */ +static void wa_seg_cb(struct urb *urb) +{ + struct wa_seg *seg = urb->context; + struct wa_xfer *xfer = seg->xfer; + struct wahc *wa; + struct device *dev; + struct wa_rpipe *rpipe; + unsigned long flags; + unsigned rpipe_ready; + u8 done = 0; + + d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); + switch (urb->status) { + case 0: + spin_lock_irqsave(&xfer->lock, flags); + wa = xfer->wa; + dev = &wa->usb_iface->dev; + d_printf(2, dev, "xfer %p#%u: request done\n", + xfer, seg->index); + if (xfer->is_inbound && seg->status < WA_SEG_PENDING) + seg->status = WA_SEG_PENDING; + spin_unlock_irqrestore(&xfer->lock, flags); + break; + case -ECONNRESET: /* URB unlinked; no need to do anything */ + case -ENOENT: /* as it was done by the who unlinked us */ + break; + default: /* Other errors ... */ + spin_lock_irqsave(&xfer->lock, flags); + wa = xfer->wa; + dev = &wa->usb_iface->dev; + rpipe = xfer->ep->hcpriv; + if (printk_ratelimit()) + dev_err(dev, "xfer %p#%u: request error %d\n", + xfer, seg->index, urb->status); + if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)){ + dev_err(dev, "DTO: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + } + usb_unlink_urb(seg->dto_urb); + seg->status = WA_SEG_ERROR; + seg->result = urb->status; + xfer->segs_done++; + __wa_xfer_abort(xfer); + rpipe_ready = rpipe_avail_inc(rpipe); + done = __wa_xfer_is_done(xfer); + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + } + d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); +} + +/* + * Allocate the segs array and initialize each of them + * + * The segments are freed by wa_xfer_destroy() when the xfer use count + * drops to zero; however, because each segment is given the same life + * cycle as the USB URB it contains, it is actually freed by + * usb_put_urb() on the contained USB URB (twisted, eh?). + */ +static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) +{ + int result, cnt; + size_t alloc_size = sizeof(*xfer->seg[0]) + - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; + struct usb_device *usb_dev = xfer->wa->usb_dev; + const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; + struct wa_seg *seg; + size_t buf_itr, buf_size, buf_itr_size; + + result = -ENOMEM; + xfer->seg = kzalloc(xfer->segs * sizeof(xfer->seg[0]), GFP_ATOMIC); + if (xfer->seg == NULL) + goto error_segs_kzalloc; + buf_itr = 0; + buf_size = xfer->urb->transfer_buffer_length; + for (cnt = 0; cnt < xfer->segs; cnt++) { + seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC); + if (seg == NULL) + goto error_seg_kzalloc; + wa_seg_init(seg); + seg->xfer = xfer; + seg->index = cnt; + usb_fill_bulk_urb(&seg->urb, usb_dev, + usb_sndbulkpipe(usb_dev, + dto_epd->bEndpointAddress), + &seg->xfer_hdr, xfer_hdr_size, + wa_seg_cb, seg); + buf_itr_size = buf_size > xfer->seg_size ? + xfer->seg_size : buf_size; + if (xfer->is_inbound == 0 && buf_size > 0) { + seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (seg->dto_urb == NULL) + goto error_dto_alloc; + usb_fill_bulk_urb( + seg->dto_urb, usb_dev, + usb_sndbulkpipe(usb_dev, + dto_epd->bEndpointAddress), + NULL, 0, wa_seg_dto_cb, seg); + if (xfer->is_dma) { + seg->dto_urb->transfer_dma = + xfer->urb->transfer_dma + buf_itr; + seg->dto_urb->transfer_flags |= + URB_NO_TRANSFER_DMA_MAP; + } else + seg->dto_urb->transfer_buffer = + xfer->urb->transfer_buffer + buf_itr; + seg->dto_urb->transfer_buffer_length = buf_itr_size; + } + seg->status = WA_SEG_READY; + buf_itr += buf_itr_size; + buf_size -= buf_itr_size; + } + return 0; + +error_dto_alloc: + kfree(xfer->seg[cnt]); + cnt--; +error_seg_kzalloc: + /* use the fact that cnt is left at were it failed */ + for (; cnt > 0; cnt--) { + if (xfer->is_inbound == 0) + kfree(xfer->seg[cnt]->dto_urb); + kfree(xfer->seg[cnt]); + } +error_segs_kzalloc: + return result; +} + +/* + * Allocates all the stuff needed to submit a transfer + * + * Breaks the whole data buffer in a list of segments, each one has a + * structure allocated to it and linked in xfer->seg[index] + * + * FIXME: merge setup_segs() and the last part of this function, no + * need to do two for loops when we could run everything in a + * single one + */ +static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) +{ + int result; + struct device *dev = &xfer->wa->usb_iface->dev; + enum wa_xfer_type xfer_type = 0; /* shut up GCC */ + size_t xfer_hdr_size, cnt, transfer_size; + struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; + + d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", + xfer, xfer->ep->hcpriv, urb); + + result = __wa_xfer_setup_sizes(xfer, &xfer_type); + if (result < 0) + goto error_setup_sizes; + xfer_hdr_size = result; + result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); + if (result < 0) { + dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", + xfer, xfer->segs, result); + goto error_setup_segs; + } + /* Fill the first header */ + xfer_hdr0 = &xfer->seg[0]->xfer_hdr; + wa_xfer_id_init(xfer); + __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); + + /* Fill remainig headers */ + xfer_hdr = xfer_hdr0; + transfer_size = urb->transfer_buffer_length; + xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? + xfer->seg_size : transfer_size; + transfer_size -= xfer->seg_size; + for (cnt = 1; cnt < xfer->segs; cnt++) { + xfer_hdr = &xfer->seg[cnt]->xfer_hdr; + memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); + xfer_hdr->bTransferSegment = cnt; + xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? + cpu_to_le32(xfer->seg_size) + : cpu_to_le32(transfer_size); + xfer->seg[cnt]->status = WA_SEG_READY; + transfer_size -= xfer->seg_size; + } + xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ + result = 0; +error_setup_segs: +error_setup_sizes: + d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", + xfer, xfer->ep->hcpriv, urb, result); + return result; +} + +/* + * + * + * rpipe->seg_lock is held! + */ +static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, + struct wa_seg *seg) +{ + int result; + result = usb_submit_urb(&seg->urb, GFP_ATOMIC); + if (result < 0) { + printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", + xfer, seg->index, result); + goto error_seg_submit; + } + if (seg->dto_urb) { + result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); + if (result < 0) { + printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", + xfer, seg->index, result); + goto error_dto_submit; + } + } + seg->status = WA_SEG_SUBMITTED; + rpipe_avail_dec(rpipe); + return 0; + +error_dto_submit: + usb_unlink_urb(&seg->urb); +error_seg_submit: + seg->status = WA_SEG_ERROR; + seg->result = result; + return result; +} + +/* + * Execute more queued request segments until the maximum concurrent allowed + * + * The ugly unlock/lock sequence on the error path is needed as the + * xfer->lock normally nests the seg_lock and not viceversa. + * + */ +static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) +{ + int result; + struct device *dev = &rpipe->wa->usb_iface->dev; + struct wa_seg *seg; + struct wa_xfer *xfer; + unsigned long flags; + + d_fnstart(1, dev, "(rpipe #%d) %d segments available\n", + le16_to_cpu(rpipe->descr.wRPipeIndex), + atomic_read(&rpipe->segs_available)); + spin_lock_irqsave(&rpipe->seg_lock, flags); + while (atomic_read(&rpipe->segs_available) > 0 + && !list_empty(&rpipe->seg_list)) { + seg = list_entry(rpipe->seg_list.next, struct wa_seg, + list_node); + list_del(&seg->list_node); + xfer = seg->xfer; + result = __wa_seg_submit(rpipe, xfer, seg); + d_printf(1, dev, "xfer %p#%u submitted from delayed " + "[%d segments available] %d\n", + xfer, seg->index, + atomic_read(&rpipe->segs_available), result); + if (unlikely(result < 0)) { + spin_unlock_irqrestore(&rpipe->seg_lock, flags); + spin_lock_irqsave(&xfer->lock, flags); + __wa_xfer_abort(xfer); + xfer->segs_done++; + spin_unlock_irqrestore(&xfer->lock, flags); + spin_lock_irqsave(&rpipe->seg_lock, flags); + } + } + spin_unlock_irqrestore(&rpipe->seg_lock, flags); + d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n", + le16_to_cpu(rpipe->descr.wRPipeIndex), + atomic_read(&rpipe->segs_available)); + +} + +/* + * + * xfer->lock is taken + * + * On failure submitting we just stop submitting and return error; + * wa_urb_enqueue_b() will execute the completion path + */ +static int __wa_xfer_submit(struct wa_xfer *xfer) +{ + int result; + struct wahc *wa = xfer->wa; + struct device *dev = &wa->usb_iface->dev; + unsigned cnt; + struct wa_seg *seg; + unsigned long flags; + struct wa_rpipe *rpipe = xfer->ep->hcpriv; + size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); + u8 available; + u8 empty; + + d_fnstart(3, dev, "(xfer %p [rpipe %p])\n", + xfer, xfer->ep->hcpriv); + + spin_lock_irqsave(&wa->xfer_list_lock, flags); + list_add_tail(&xfer->list_node, &wa->xfer_list); + spin_unlock_irqrestore(&wa->xfer_list_lock, flags); + + BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); + result = 0; + spin_lock_irqsave(&rpipe->seg_lock, flags); + for (cnt = 0; cnt < xfer->segs; cnt++) { + available = atomic_read(&rpipe->segs_available); + empty = list_empty(&rpipe->seg_list); + seg = xfer->seg[cnt]; + d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n", + xfer, cnt, available, empty, + available == 0 || !empty ? "delayed" : "submitted"); + if (available == 0 || !empty) { + d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt); + seg->status = WA_SEG_DELAYED; + list_add_tail(&seg->list_node, &rpipe->seg_list); + } else { + result = __wa_seg_submit(rpipe, xfer, seg); + if (result < 0) + goto error_seg_submit; + } + xfer->segs_submitted++; + } + spin_unlock_irqrestore(&rpipe->seg_lock, flags); + d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, + xfer->ep->hcpriv); + return result; + +error_seg_submit: + __wa_xfer_abort(xfer); + spin_unlock_irqrestore(&rpipe->seg_lock, flags); + d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, + xfer->ep->hcpriv); + return result; +} + +/* + * Second part of a URB/transfer enqueuement + * + * Assumes this comes from wa_urb_enqueue() [maybe through + * wa_urb_enqueue_run()]. At this point: + * + * xfer->wa filled and refcounted + * xfer->ep filled with rpipe refcounted if + * delayed == 0 + * xfer->urb filled and refcounted (this is the case when called + * from wa_urb_enqueue() as we come from usb_submit_urb() + * and when called by wa_urb_enqueue_run(), as we took an + * extra ref dropped by _run() after we return). + * xfer->gfp filled + * + * If we fail at __wa_xfer_submit(), then we just check if we are done + * and if so, we run the completion procedure. However, if we are not + * yet done, we do nothing and wait for the completion handlers from + * the submitted URBs or from the xfer-result path to kick in. If xfer + * result never kicks in, the xfer will timeout from the USB code and + * dequeue() will be called. + */ +static void wa_urb_enqueue_b(struct wa_xfer *xfer) +{ + int result; + unsigned long flags; + struct urb *urb = xfer->urb; + struct wahc *wa = xfer->wa; + struct wusbhc *wusbhc = wa->wusb; + struct device *dev = &wa->usb_iface->dev; + struct wusb_dev *wusb_dev; + unsigned done; + + d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb); + result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); + if (result < 0) + goto error_rpipe_get; + result = -ENODEV; + /* FIXME: segmentation broken -- kills DWA */ + mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ + if (urb->dev == NULL) + goto error_dev_gone; + wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); + if (wusb_dev == NULL) { + mutex_unlock(&wusbhc->mutex); + goto error_dev_gone; + } + mutex_unlock(&wusbhc->mutex); + + spin_lock_irqsave(&xfer->lock, flags); + xfer->wusb_dev = wusb_dev; + result = urb->status; + if (urb->status != -EINPROGRESS) + goto error_dequeued; + + result = __wa_xfer_setup(xfer, urb); + if (result < 0) + goto error_xfer_setup; + result = __wa_xfer_submit(xfer); + if (result < 0) + goto error_xfer_submit; + spin_unlock_irqrestore(&xfer->lock, flags); + d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb); + return; + + /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() + * does a wa_xfer_put() that will call wa_xfer_destroy() and clean + * upundo setup(). + */ +error_xfer_setup: +error_dequeued: + spin_unlock_irqrestore(&xfer->lock, flags); + /* FIXME: segmentation broken, kills DWA */ + if (wusb_dev) + wusb_dev_put(wusb_dev); +error_dev_gone: + rpipe_put(xfer->ep->hcpriv); +error_rpipe_get: + xfer->result = result; + wa_xfer_giveback(xfer); + d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); + return; + +error_xfer_submit: + done = __wa_xfer_is_done(xfer); + xfer->result = result; + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); + return; +} + +/* + * Execute the delayed transfers in the Wire Adapter @wa + * + * We need to be careful here, as dequeue() could be called in the + * middle. That's why we do the whole thing under the + * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock + * and then checks the list -- so as we would be acquiring in inverse + * order, we just drop the lock once we have the xfer and reacquire it + * later. + */ +void wa_urb_enqueue_run(struct work_struct *ws) +{ + struct wahc *wa = container_of(ws, struct wahc, xfer_work); + struct device *dev = &wa->usb_iface->dev; + struct wa_xfer *xfer, *next; + struct urb *urb; + + d_fnstart(3, dev, "(wa %p)\n", wa); + spin_lock_irq(&wa->xfer_list_lock); + list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, + list_node) { + list_del_init(&xfer->list_node); + spin_unlock_irq(&wa->xfer_list_lock); + + urb = xfer->urb; + wa_urb_enqueue_b(xfer); + usb_put_urb(urb); /* taken when queuing */ + + spin_lock_irq(&wa->xfer_list_lock); + } + spin_unlock_irq(&wa->xfer_list_lock); + d_fnend(3, dev, "(wa %p) = void\n", wa); +} +EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); + +/* + * Submit a transfer to the Wire Adapter in a delayed way + * + * The process of enqueuing involves possible sleeps() [see + * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are + * in an atomic section, we defer the enqueue_b() call--else we call direct. + * + * @urb: We own a reference to it done by the HCI Linux USB stack that + * will be given up by calling usb_hcd_giveback_urb() or by + * returning error from this function -> ergo we don't have to + * refcount it. + */ +int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, + struct urb *urb, gfp_t gfp) +{ + int result; + struct device *dev = &wa->usb_iface->dev; + struct wa_xfer *xfer; + unsigned long my_flags; + unsigned cant_sleep = irqs_disabled() | in_atomic(); + + d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n", + wa, ep, urb, urb->transfer_buffer_length, gfp); + + if (urb->transfer_buffer == NULL + && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) + && urb->transfer_buffer_length != 0) { + dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); + dump_stack(); + } + + result = -ENOMEM; + xfer = kzalloc(sizeof(*xfer), gfp); + if (xfer == NULL) + goto error_kmalloc; + + result = -ENOENT; + if (urb->status != -EINPROGRESS) /* cancelled */ + goto error_dequeued; /* before starting? */ + wa_xfer_init(xfer); + xfer->wa = wa_get(wa); + xfer->urb = urb; + xfer->gfp = gfp; + xfer->ep = ep; + urb->hcpriv = xfer; + d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", + xfer, urb, urb->pipe, urb->transfer_buffer_length, + urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", + urb->pipe & USB_DIR_IN ? "inbound" : "outbound", + cant_sleep ? "deferred" : "inline"); + if (cant_sleep) { + usb_get_urb(urb); + spin_lock_irqsave(&wa->xfer_list_lock, my_flags); + list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); + spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); + queue_work(wusbd, &wa->xfer_work); + } else { + wa_urb_enqueue_b(xfer); + } + d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n", + wa, ep, urb, urb->transfer_buffer_length, gfp); + return 0; + +error_dequeued: + kfree(xfer); +error_kmalloc: + d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n", + wa, ep, urb, urb->transfer_buffer_length, gfp, result); + return result; +} +EXPORT_SYMBOL_GPL(wa_urb_enqueue); + +/* + * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion + * handler] is called. + * + * Until a transfer goes successfully through wa_urb_enqueue() it + * needs to be dequeued with completion calling; when stuck in delayed + * or before wa_xfer_setup() is called, we need to do completion. + * + * not setup If there is no hcpriv yet, that means that that enqueue + * still had no time to set the xfer up. Because + * urb->status should be other than -EINPROGRESS, + * enqueue() will catch that and bail out. + * + * If the transfer has gone through setup, we just need to clean it + * up. If it has gone through submit(), we have to abort it [with an + * asynch request] and then make sure we cancel each segment. + * + */ +int wa_urb_dequeue(struct wahc *wa, struct urb *urb) +{ + struct device *dev = &wa->usb_iface->dev; + unsigned long flags, flags2; + struct wa_xfer *xfer; + struct wa_seg *seg; + struct wa_rpipe *rpipe; + unsigned cnt; + unsigned rpipe_ready = 0; + + d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb); + + d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb); + xfer = urb->hcpriv; + if (xfer == NULL) { + /* NOthing setup yet enqueue will see urb->status != + * -EINPROGRESS (by hcd layer) and bail out with + * error, no need to do completion + */ + BUG_ON(urb->status == -EINPROGRESS); + goto out; + } + spin_lock_irqsave(&xfer->lock, flags); + rpipe = xfer->ep->hcpriv; + /* Check the delayed list -> if there, release and complete */ + spin_lock_irqsave(&wa->xfer_list_lock, flags2); + if (!list_empty(&xfer->list_node) && xfer->seg == NULL) + goto dequeue_delayed; + spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); + if (xfer->seg == NULL) /* still hasn't reached */ + goto out_unlock; /* setup(), enqueue_b() completes */ + /* Ok, the xfer is in flight already, it's been setup and submitted.*/ + __wa_xfer_abort(xfer); + for (cnt = 0; cnt < xfer->segs; cnt++) { + seg = xfer->seg[cnt]; + switch (seg->status) { + case WA_SEG_NOTREADY: + case WA_SEG_READY: + printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", + xfer, cnt, seg->status); + WARN_ON(1); + break; + case WA_SEG_DELAYED: + seg->status = WA_SEG_ABORTED; + spin_lock_irqsave(&rpipe->seg_lock, flags2); + list_del(&seg->list_node); + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + spin_unlock_irqrestore(&rpipe->seg_lock, flags2); + break; + case WA_SEG_SUBMITTED: + seg->status = WA_SEG_ABORTED; + usb_unlink_urb(&seg->urb); + if (xfer->is_inbound == 0) + usb_unlink_urb(seg->dto_urb); + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + break; + case WA_SEG_PENDING: + seg->status = WA_SEG_ABORTED; + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + break; + case WA_SEG_DTI_PENDING: + usb_unlink_urb(wa->dti_urb); + seg->status = WA_SEG_ABORTED; + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + break; + case WA_SEG_DONE: + case WA_SEG_ERROR: + case WA_SEG_ABORTED: + break; + } + } + xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ + __wa_xfer_is_done(xfer); + spin_unlock_irqrestore(&xfer->lock, flags); + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); + return 0; + +out_unlock: + spin_unlock_irqrestore(&xfer->lock, flags); +out: + d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); + return 0; + +dequeue_delayed: + list_del_init(&xfer->list_node); + spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); + xfer->result = urb->status; + spin_unlock_irqrestore(&xfer->lock, flags); + wa_xfer_giveback(xfer); + usb_put_urb(urb); /* we got a ref in enqueue() */ + d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); + return 0; +} +EXPORT_SYMBOL_GPL(wa_urb_dequeue); + +/* + * Translation from WA status codes (WUSB1.0 Table 8.15) to errno + * codes + * + * Positive errno values are internal inconsistencies and should be + * flagged louder. Negative are to be passed up to the user in the + * normal way. + * + * @status: USB WA status code -- high two bits are stripped. + */ +static int wa_xfer_status_to_errno(u8 status) +{ + int errno; + u8 real_status = status; + static int xlat[] = { + [WA_XFER_STATUS_SUCCESS] = 0, + [WA_XFER_STATUS_HALTED] = -EPIPE, + [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, + [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, + [WA_XFER_RESERVED] = EINVAL, + [WA_XFER_STATUS_NOT_FOUND] = 0, + [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, + [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, + [WA_XFER_STATUS_ABORTED] = -EINTR, + [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, + [WA_XFER_INVALID_FORMAT] = EINVAL, + [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, + [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, + }; + status &= 0x3f; + + if (status == 0) + return 0; + if (status >= ARRAY_SIZE(xlat)) { + if (printk_ratelimit()) + printk(KERN_ERR "%s(): BUG? " + "Unknown WA transfer status 0x%02x\n", + __func__, real_status); + return -EINVAL; + } + errno = xlat[status]; + if (unlikely(errno > 0)) { + if (printk_ratelimit()) + printk(KERN_ERR "%s(): BUG? " + "Inconsistent WA status: 0x%02x\n", + __func__, real_status); + errno = -errno; + } + return errno; +} + +/* + * Process a xfer result completion message + * + * inbound transfers: need to schedule a DTI read + * + * FIXME: this functio needs to be broken up in parts + */ +static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) +{ + int result; + struct device *dev = &wa->usb_iface->dev; + unsigned long flags; + u8 seg_idx; + struct wa_seg *seg; + struct wa_rpipe *rpipe; + struct wa_xfer_result *xfer_result = wa->xfer_result; + u8 done = 0; + u8 usb_status; + unsigned rpipe_ready = 0; + + d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer); + spin_lock_irqsave(&xfer->lock, flags); + seg_idx = xfer_result->bTransferSegment & 0x7f; + if (unlikely(seg_idx >= xfer->segs)) + goto error_bad_seg; + seg = xfer->seg[seg_idx]; + rpipe = xfer->ep->hcpriv; + usb_status = xfer_result->bTransferStatus; + d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", + xfer, seg_idx, usb_status, seg->status); + if (seg->status == WA_SEG_ABORTED + || seg->status == WA_SEG_ERROR) /* already handled */ + goto segment_aborted; + if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ + seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ + if (seg->status != WA_SEG_PENDING) { + if (printk_ratelimit()) + dev_err(dev, "xfer %p#%u: Bad segment state %u\n", + xfer, seg_idx, seg->status); + seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ + } + if (usb_status & 0x80) { + seg->result = wa_xfer_status_to_errno(usb_status); + dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n", + xfer, seg->index, usb_status); + goto error_complete; + } + /* FIXME: we ignore warnings, tally them for stats */ + if (usb_status & 0x40) /* Warning?... */ + usb_status = 0; /* ... pass */ + if (xfer->is_inbound) { /* IN data phase: read to buffer */ + seg->status = WA_SEG_DTI_PENDING; + BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); + if (xfer->is_dma) { + wa->buf_in_urb->transfer_dma = + xfer->urb->transfer_dma + + seg_idx * xfer->seg_size; + wa->buf_in_urb->transfer_flags + |= URB_NO_TRANSFER_DMA_MAP; + } else { + wa->buf_in_urb->transfer_buffer = + xfer->urb->transfer_buffer + + seg_idx * xfer->seg_size; + wa->buf_in_urb->transfer_flags + &= ~URB_NO_TRANSFER_DMA_MAP; + } + wa->buf_in_urb->transfer_buffer_length = + le32_to_cpu(xfer_result->dwTransferLength); + wa->buf_in_urb->context = seg; + result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); + if (result < 0) + goto error_submit_buf_in; + } else { + /* OUT data phase, complete it -- */ + seg->status = WA_SEG_DONE; + seg->result = le32_to_cpu(xfer_result->dwTransferLength); + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + done = __wa_xfer_is_done(xfer); + } + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer); + return; + + +error_submit_buf_in: + if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "DTI: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + } + if (printk_ratelimit()) + dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", + xfer, seg_idx, result); + seg->result = result; +error_complete: + seg->status = WA_SEG_ERROR; + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + __wa_xfer_abort(xfer); + done = __wa_xfer_is_done(xfer); + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n", + wa, xfer); + return; + + +error_bad_seg: + spin_unlock_irqrestore(&xfer->lock, flags); + wa_urb_dequeue(wa, xfer->urb); + if (printk_ratelimit()) + dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); + if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "DTI: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + } + d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer); + return; + + +segment_aborted: + /* nothing to do, as the aborter did the completion */ + spin_unlock_irqrestore(&xfer->lock, flags); + d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n", + wa, xfer); + return; + +} + +/* + * Callback for the IN data phase + * + * If succesful transition state; otherwise, take a note of the + * error, mark this segment done and try completion. + * + * Note we don't access until we are sure that the transfer hasn't + * been cancelled (ECONNRESET, ENOENT), which could mean that + * seg->xfer could be already gone. + */ +static void wa_buf_in_cb(struct urb *urb) +{ + struct wa_seg *seg = urb->context; + struct wa_xfer *xfer = seg->xfer; + struct wahc *wa; + struct device *dev; + struct wa_rpipe *rpipe; + unsigned rpipe_ready; + unsigned long flags; + u8 done = 0; + + d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); + switch (urb->status) { + case 0: + spin_lock_irqsave(&xfer->lock, flags); + wa = xfer->wa; + dev = &wa->usb_iface->dev; + rpipe = xfer->ep->hcpriv; + d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n", + xfer, seg->index, (size_t)urb->actual_length); + seg->status = WA_SEG_DONE; + seg->result = urb->actual_length; + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + done = __wa_xfer_is_done(xfer); + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + break; + case -ECONNRESET: /* URB unlinked; no need to do anything */ + case -ENOENT: /* as it was done by the who unlinked us */ + break; + default: /* Other errors ... */ + spin_lock_irqsave(&xfer->lock, flags); + wa = xfer->wa; + dev = &wa->usb_iface->dev; + rpipe = xfer->ep->hcpriv; + if (printk_ratelimit()) + dev_err(dev, "xfer %p#%u: data in error %d\n", + xfer, seg->index, urb->status); + if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)){ + dev_err(dev, "DTO: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + } + seg->status = WA_SEG_ERROR; + seg->result = urb->status; + xfer->segs_done++; + rpipe_ready = rpipe_avail_inc(rpipe); + __wa_xfer_abort(xfer); + done = __wa_xfer_is_done(xfer); + spin_unlock_irqrestore(&xfer->lock, flags); + if (done) + wa_xfer_completion(xfer); + if (rpipe_ready) + wa_xfer_delayed_run(rpipe); + } + d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); +} + +/* + * Handle an incoming transfer result buffer + * + * Given a transfer result buffer, it completes the transfer (possibly + * scheduling and buffer in read) and then resubmits the DTI URB for a + * new transfer result read. + * + * + * The xfer_result DTI URB state machine + * + * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) + * + * We start in OFF mode, the first xfer_result notification [through + * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to + * read. + * + * We receive a buffer -- if it is not a xfer_result, we complain and + * repost the DTI-URB. If it is a xfer_result then do the xfer seg + * request accounting. If it is an IN segment, we move to RBI and post + * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will + * repost the DTI-URB and move to RXR state. if there was no IN + * segment, it will repost the DTI-URB. + * + * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many + * errors) in the URBs. + */ +static void wa_xfer_result_cb(struct urb *urb) +{ + int result; + struct wahc *wa = urb->context; + struct device *dev = &wa->usb_iface->dev; + struct wa_xfer_result *xfer_result; + u32 xfer_id; + struct wa_xfer *xfer; + u8 usb_status; + + d_fnstart(3, dev, "(%p)\n", wa); + BUG_ON(wa->dti_urb != urb); + switch (wa->dti_urb->status) { + case 0: + /* We have a xfer result buffer; check it */ + d_printf(2, dev, "DTI: xfer result %d bytes at %p\n", + urb->actual_length, urb->transfer_buffer); + d_dump(3, dev, urb->transfer_buffer, urb->actual_length); + if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { + dev_err(dev, "DTI Error: xfer result--bad size " + "xfer result (%d bytes vs %zu needed)\n", + urb->actual_length, sizeof(*xfer_result)); + break; + } + xfer_result = wa->xfer_result; + if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { + dev_err(dev, "DTI Error: xfer result--" + "bad header length %u\n", + xfer_result->hdr.bLength); + break; + } + if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { + dev_err(dev, "DTI Error: xfer result--" + "bad header type 0x%02x\n", + xfer_result->hdr.bNotifyType); + break; + } + usb_status = xfer_result->bTransferStatus & 0x3f; + if (usb_status == WA_XFER_STATUS_ABORTED + || usb_status == WA_XFER_STATUS_NOT_FOUND) + /* taken care of already */ + break; + xfer_id = xfer_result->dwTransferID; + xfer = wa_xfer_get_by_id(wa, xfer_id); + if (xfer == NULL) { + /* FIXME: transaction might have been cancelled */ + dev_err(dev, "DTI Error: xfer result--" + "unknown xfer 0x%08x (status 0x%02x)\n", + xfer_id, usb_status); + break; + } + wa_xfer_result_chew(wa, xfer); + wa_xfer_put(xfer); + break; + case -ENOENT: /* (we killed the URB)...so, no broadcast */ + case -ESHUTDOWN: /* going away! */ + dev_dbg(dev, "DTI: going down! %d\n", urb->status); + goto out; + default: + /* Unknown error */ + if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, + EDC_ERROR_TIMEFRAME)) { + dev_err(dev, "DTI: URB max acceptable errors " + "exceeded, resetting device\n"); + wa_reset_all(wa); + goto out; + } + if (printk_ratelimit()) + dev_err(dev, "DTI: URB error %d\n", urb->status); + break; + } + /* Resubmit the DTI URB */ + result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); + if (result < 0) { + dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " + "resetting\n", result); + wa_reset_all(wa); + } +out: + d_fnend(3, dev, "(%p) = void\n", wa); + return; +} + +/* + * Transfer complete notification + * + * Called from the notif.c code. We get a notification on EP2 saying + * that some endpoint has some transfer result data available. We are + * about to read it. + * + * To speed up things, we always have a URB reading the DTI URB; we + * don't really set it up and start it until the first xfer complete + * notification arrives, which is what we do here. + * + * Follow up in wa_xfer_result_cb(), as that's where the whole state + * machine starts. + * + * So here we just initialize the DTI URB for reading transfer result + * notifications and also the buffer-in URB, for reading buffers. Then + * we just submit the DTI URB. + * + * @wa shall be referenced + */ +void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) +{ + int result; + struct device *dev = &wa->usb_iface->dev; + struct wa_notif_xfer *notif_xfer; + const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; + + d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr); + notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); + BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); + + if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { + /* FIXME: hardcoded limitation, adapt */ + dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", + notif_xfer->bEndpoint, dti_epd->bEndpointAddress); + goto error; + } + if (wa->dti_urb != NULL) /* DTI URB already started */ + goto out; + + wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); + if (wa->dti_urb == NULL) { + dev_err(dev, "Can't allocate DTI URB\n"); + goto error_dti_urb_alloc; + } + usb_fill_bulk_urb( + wa->dti_urb, wa->usb_dev, + usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), + wa->xfer_result, wa->xfer_result_size, + wa_xfer_result_cb, wa); + + wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); + if (wa->buf_in_urb == NULL) { + dev_err(dev, "Can't allocate BUF-IN URB\n"); + goto error_buf_in_urb_alloc; + } + usb_fill_bulk_urb( + wa->buf_in_urb, wa->usb_dev, + usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), + NULL, 0, wa_buf_in_cb, wa); + result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); + if (result < 0) { + dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " + "resetting\n", result); + goto error_dti_urb_submit; + } +out: + d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); + return; + +error_dti_urb_submit: + usb_put_urb(wa->buf_in_urb); +error_buf_in_urb_alloc: + usb_put_urb(wa->dti_urb); + wa->dti_urb = NULL; +error_dti_urb_alloc: +error: + wa_reset_all(wa); + d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); + return; +} -- cgit v0.10.2 From d09318b8ab2eabb65b6fa0dc04dab1822846eabb Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:30 +0100 Subject: wusb: add HWA host controller driver Add a driver for Wireless USB host controllers connected via USB (a Host Wire Adapter or HWA). Signed-off-by: David Vrabel diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 19ede32..8b7c419 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/ obj-$(CONFIG_USB_SL811_HCD) += host/ obj-$(CONFIG_USB_U132_HCD) += host/ obj-$(CONFIG_USB_R8A66597_HCD) += host/ +obj-$(CONFIG_USB_HWA_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index d3ba351..5150441 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -317,3 +317,17 @@ config USB_WHCI_HCD To compile this driver a module, choose M here: the module will be called "whci-hcd". + +config USB_HWA_HCD + tristate "Host Wire Adapter (HWA) driver" + depends on USB + select USB_WUSB + select UWB_HWA + help + This driver enables you to connect Wireless USB devices to + your system using a Host Wire Adaptor USB dongle. This is an + UWB Radio Controller and WUSB Host Controller connected to + your machine via USB (specified in WUSB1.0). + + To compile this driver a module, choose M here: the module + will be called "hwa-hc". diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index c90cf86..23be222 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -21,3 +21,4 @@ obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o +obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c new file mode 100644 index 0000000..64be4d8 --- /dev/null +++ b/drivers/usb/host/hwa-hc.c @@ -0,0 +1,925 @@ +/* + * Host Wire Adapter: + * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC + * + * Copyright (C) 2005-2006 Intel Corporation + * Inaky Perez-Gonzalez + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * The HWA driver is a simple layer that forwards requests to the WAHC + * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host + * Controller) layers. + * + * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB + * Host Controller that is connected to your system via USB (a USB + * dongle that implements a USB host...). There is also a Device Wired + * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for + * transferring data (it is after all a USB host connected via + * Wireless USB), we have a common layer called Wire Adapter Host + * Controller that does all the hard work. The WUSBHC (Wireless USB + * Host Controller) is the part common to WUSB Host Controllers, the + * HWA and the PCI-based one, that is implemented following the WHCI + * spec. All these layers are implemented in ../wusbcore. + * + * The main functions are hwahc_op_urb_{en,de}queue(), that pass the + * job of converting a URB to a Wire Adapter + * + * Entry points: + * + * hwahc_driver_*() Driver initialization, registration and + * teardown. + * + * hwahc_probe() New device came up, create an instance for + * it [from device enumeration]. + * + * hwahc_disconnect() Remove device instance [from device + * enumeration]. + * + * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for + * starting/stopping/etc (some might be made also + * DWA). + */ +#include +#include +#include +#include +#include +#include +#include +#include "../wusbcore/wa-hc.h" +#include "../wusbcore/wusbhc.h" + +#define D_LOCAL 0 +#include + +struct hwahc { + struct wusbhc wusbhc; /* has to be 1st */ + struct wahc wa; + u8 buffer[16]; /* for misc usb transactions */ +}; + +/** + * FIXME should be wusbhc + * + * NOTE: we need to cache the Cluster ID because later...there is no + * way to get it :) + */ +static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id) +{ + int result; + struct wusbhc *wusbhc = &hwahc->wusbhc; + struct wahc *wa = &hwahc->wa; + struct device *dev = &wa->usb_iface->dev; + + result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_SET_CLUSTER_ID, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + cluster_id, + wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0) + dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n", + cluster_id, result); + else + wusbhc->cluster_id = cluster_id; + dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id); + return result; +} + +static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) +{ + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + + return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_SET_NUM_DNTS, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + interval << 8 | slots, + wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 1000 /* FIXME: arbitrary */); +} + +/* + * Reset a WUSB host controller and wait for it to complete doing it. + * + * @usb_hcd: Pointer to WUSB Host Controller instance. + * + */ +static int hwahc_op_reset(struct usb_hcd *usb_hcd) +{ + int result; + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct device *dev = &hwahc->wa.usb_iface->dev; + + d_fnstart(4, dev, "(hwahc %p)\n", hwahc); + mutex_lock(&wusbhc->mutex); + wa_nep_disarm(&hwahc->wa); + result = __wa_set_feature(&hwahc->wa, WA_RESET); + if (result < 0) { + dev_err(dev, "error commanding HC to reset: %d\n", result); + goto error_unlock; + } + d_printf(3, dev, "reset: waiting for device to change state\n"); + result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); + if (result < 0) { + dev_err(dev, "error waiting for HC to reset: %d\n", result); + goto error_unlock; + } +error_unlock: + mutex_unlock(&wusbhc->mutex); + d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); + return result; +} + +/* + * FIXME: break this function up + */ +static int hwahc_op_start(struct usb_hcd *usb_hcd) +{ + u8 addr; + int result; + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct device *dev = &hwahc->wa.usb_iface->dev; + + /* Set up a Host Info WUSB Information Element */ + d_fnstart(4, dev, "(hwahc %p)\n", hwahc); + result = -ENOSPC; + mutex_lock(&wusbhc->mutex); + /* Start the numbering from the top so that the bottom + * range of the unauth addr space is used for devices, + * the top for HCs; use 0xfe - RC# */ + addr = wusb_cluster_id_get(); + if (addr == 0) + goto error_cluster_id_get; + result = __hwahc_set_cluster_id(hwahc, addr); + if (result < 0) + goto error_set_cluster_id; + + result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); + if (result < 0) { + dev_err(dev, "cannot listen to notifications: %d\n", result); + goto error_stop; + } + usb_hcd->uses_new_polling = 1; + usb_hcd->poll_rh = 1; + usb_hcd->state = HC_STATE_RUNNING; + result = 0; +out: + mutex_unlock(&wusbhc->mutex); + d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); + return result; + +error_stop: + __wa_stop(&hwahc->wa); +error_set_cluster_id: + wusb_cluster_id_put(wusbhc->cluster_id); +error_cluster_id_get: + goto out; + +} + +/* + * FIXME: break this function up + */ +static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) +{ + int result; + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct device *dev = &hwahc->wa.usb_iface->dev; + + /* Set up a Host Info WUSB Information Element */ + d_fnstart(4, dev, "(hwahc %p)\n", hwahc); + result = -ENOSPC; + + result = __wa_set_feature(&hwahc->wa, WA_ENABLE); + if (result < 0) { + dev_err(dev, "error commanding HC to start: %d\n", result); + goto error_stop; + } + result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); + if (result < 0) { + dev_err(dev, "error waiting for HC to start: %d\n", result); + goto error_stop; + } + result = 0; +out: + d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); + return result; + +error_stop: + result = __wa_clear_feature(&hwahc->wa, WA_ENABLE); + goto out; +} + +static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__, + usb_hcd, hwahc, *(unsigned long *) &msg); + return -ENOSYS; +} + +static int hwahc_op_resume(struct usb_hcd *usb_hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + + dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, + usb_hcd, hwahc); + return -ENOSYS; +} + +static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc) +{ + int result; + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct device *dev = &hwahc->wa.usb_iface->dev; + + d_fnstart(4, dev, "(hwahc %p)\n", hwahc); + /* Nothing for now */ + d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); + return; +} + +/* + * No need to abort pipes, as when this is called, all the children + * has been disconnected and that has done it [through + * usb_disable_interface() -> usb_disable_endpoint() -> + * hwahc_op_ep_disable() - >rpipe_ep_disable()]. + */ +static void hwahc_op_stop(struct usb_hcd *usb_hcd) +{ + int result; + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + struct device *dev = &wa->usb_iface->dev; + + d_fnstart(4, dev, "(hwahc %p)\n", hwahc); + mutex_lock(&wusbhc->mutex); + wusbhc_stop(wusbhc); + wa_nep_disarm(&hwahc->wa); + result = __wa_stop(&hwahc->wa); + wusb_cluster_id_put(wusbhc->cluster_id); + mutex_unlock(&wusbhc->mutex); + d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); + return; +} + +static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + + dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__, + usb_hcd, hwahc); + return -ENOSYS; +} + +static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, + gfp_t gfp) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + + return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp); +} + +static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, + int status) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + + return wa_urb_dequeue(&hwahc->wa, urb); +} + +/* + * Release resources allocated for an endpoint + * + * If there is an associated rpipe to this endpoint, go ahead and put it. + */ +static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd, + struct usb_host_endpoint *ep) +{ + struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + + rpipe_ep_disable(&hwahc->wa, ep); +} + +/* + * Set the UWB MAS allocation for the WUSB cluster + * + * @stream_index: stream to use (-1 for cancelling the allocation) + * @mas: mas bitmap to use + */ +static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index, + const struct uwb_mas_bm *mas) +{ + int result; + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + struct device *dev = &wa->usb_iface->dev; + u8 mas_le[UWB_NUM_MAS/8]; + + /* Set the stream index */ + result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_SET_STREAM_IDX, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + stream_index, + wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "Cannot set WUSB stream index: %d\n", result); + goto out; + } + uwb_mas_bm_copy_le(mas_le, mas); + /* Set the MAS allocation */ + result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_SET_WUSB_MAS, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber, + mas_le, 32, 1000 /* FIXME: arbitrary */); + if (result < 0) + dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result); +out: + return result; +} + +/* + * Add an IE to the host's MMC + * + * @interval: See WUSB1.0[8.5.3.1] + * @repeat_cnt: See WUSB1.0[8.5.3.1] + * @handle: See WUSB1.0[8.5.3.1] + * @wuie: Pointer to the header of the WUSB IE data to add. + * MUST BE allocated in a kmalloc buffer (no stack or + * vmalloc). + * + * NOTE: the format of the WUSB IEs for MMCs are different to the + * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length + + * Id in WUSB IEs). Standards...you gotta love'em. + */ +static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval, + u8 repeat_cnt, u8 handle, + struct wuie_hdr *wuie) +{ + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; + + return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_ADD_MMC_IE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + interval << 8 | repeat_cnt, + handle << 8 | iface_no, + wuie, wuie->bLength, 1000 /* FIXME: arbitrary */); +} + +/* + * Remove an IE to the host's MMC + * + * @handle: See WUSB1.0[8.5.3.1] + */ +static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle) +{ + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; + return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_REMOVE_MMC_IE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, handle << 8 | iface_no, + NULL, 0, 1000 /* FIXME: arbitrary */); +} + +/* + * Update device information for a given fake port + * + * @port_idx: Fake port to which device is connected (wusbhc index, not + * USB port number). + */ +static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc, + struct wusb_dev *wusb_dev) +{ + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; + struct hwa_dev_info *dev_info; + int ret; + + /* fill out the Device Info buffer and send it */ + dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability, + &wusb_dev->availability); + dev_info->bDeviceAddress = wusb_dev->addr; + + /* + * If the descriptors haven't been read yet, use a default PHY + * rate of 53.3 Mbit/s only. The correct value will be used + * when this will be called again as part of the + * authentication process (which occurs after the descriptors + * have been read). + */ + if (wusb_dev->wusb_cap_descr) + dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates; + else + dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53); + + ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + WUSB_REQ_SET_DEV_INFO, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, wusb_dev->port_idx << 8 | iface_no, + dev_info, sizeof(struct hwa_dev_info), + 1000 /* FIXME: arbitrary */); + kfree(dev_info); + return ret; +} + +/* + * Set host's idea of which encryption (and key) method to use when + * talking to ad evice on a given port. + * + * If key is NULL, it means disable encryption for that "virtual port" + * (used when we disconnect). + */ +static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, + const void *key, size_t key_size, + u8 key_idx) +{ + int result = -ENOMEM; + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_key_descriptor *keyd; + size_t keyd_len; + + keyd_len = sizeof(*keyd) + key_size; + keyd = kzalloc(keyd_len, GFP_KERNEL); + if (keyd == NULL) + return -ENOMEM; + + keyd->bLength = keyd_len; + keyd->bDescriptorType = USB_DT_KEY; + keyd->tTKID[0] = (tkid >> 0) & 0xff; + keyd->tTKID[1] = (tkid >> 8) & 0xff; + keyd->tTKID[2] = (tkid >> 16) & 0xff; + memcpy(keyd->bKeyData, key, key_size); + + result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + USB_REQ_SET_DESCRIPTOR, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + USB_DT_KEY << 8 | key_idx, + port_idx << 8 | iface_no, + keyd, keyd_len, 1000 /* FIXME: arbitrary */); + + memset(keyd, 0, sizeof(*keyd)); /* clear keys etc. */ + kfree(keyd); + return result; +} + +/* + * Set host's idea of which encryption (and key) method to use when + * talking to ad evice on a given port. + * + * If key is NULL, it means disable encryption for that "virtual port" + * (used when we disconnect). + */ +static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid, + const void *key, size_t key_size) +{ + int result = -ENOMEM; + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); + struct wahc *wa = &hwahc->wa; + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; + u8 encryption_value; + + /* Tell the host which key to use to talk to the device */ + if (key) { + u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK, + WUSB_KEY_INDEX_ORIGINATOR_HOST); + + result = __hwahc_dev_set_key(wusbhc, port_idx, tkid, + key, key_size, key_idx); + if (result < 0) + goto error_set_key; + encryption_value = wusbhc->ccm1_etd->bEncryptionValue; + } else { + /* FIXME: this should come from wusbhc->etd[UNSECURE].value */ + encryption_value = 0; + } + + /* Set the encryption type for commmunicating with the device */ + result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), + USB_REQ_SET_ENCRYPTION, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + encryption_value, port_idx << 8 | iface_no, + NULL, 0, 1000 /* FIXME: arbitrary */); + if (result < 0) + dev_err(wusbhc->dev, "Can't set host's WUSB encryption for " + "port index %u to %s (value %d): %d\n", port_idx, + wusb_et_name(wusbhc->ccm1_etd->bEncryptionType), + wusbhc->ccm1_etd->bEncryptionValue, result); +error_set_key: + return result; +} + +/* + * Set host's GTK key + */ +static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid, + const void *key, size_t key_size) +{ + u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK, + WUSB_KEY_INDEX_ORIGINATOR_HOST); + + return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx); +} + +/* + * Get the Wire Adapter class-specific descriptor + * + * NOTE: this descriptor comes with the big bundled configuration + * descriptor that includes the interfaces' and endpoints', so + * we just look for it in the cached copy kept by the USB stack. + * + * NOTE2: We convert LE fields to CPU order. + */ +static int wa_fill_descr(struct wahc *wa) +{ + int result; + struct device *dev = &wa->usb_iface->dev; + char *itr; + struct usb_device *usb_dev = wa->usb_dev; + struct usb_descriptor_header *hdr; + struct usb_wa_descriptor *wa_descr; + size_t itr_size, actconfig_idx; + + actconfig_idx = (usb_dev->actconfig - usb_dev->config) / + sizeof(usb_dev->config[0]); + itr = usb_dev->rawdescriptors[actconfig_idx]; + itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); + while (itr_size >= sizeof(*hdr)) { + hdr = (struct usb_descriptor_header *) itr; + d_printf(3, dev, "Extra device descriptor: " + "type %02x/%u bytes @ %zu (%zu left)\n", + hdr->bDescriptorType, hdr->bLength, + (itr - usb_dev->rawdescriptors[actconfig_idx]), + itr_size); + if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) + goto found; + itr += hdr->bLength; + itr_size -= hdr->bLength; + } + dev_err(dev, "cannot find Wire Adapter Class descriptor\n"); + return -ENODEV; + +found: + result = -EINVAL; + if (hdr->bLength > itr_size) { /* is it available? */ + dev_err(dev, "incomplete Wire Adapter Class descriptor " + "(%zu bytes left, %u needed)\n", + itr_size, hdr->bLength); + goto error; + } + if (hdr->bLength < sizeof(*wa->wa_descr)) { + dev_err(dev, "short Wire Adapter Class descriptor\n"); + goto error; + } + wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; + /* Make LE fields CPU order */ + wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion); + wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes); + wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock); + if (wa_descr->bcdWAVersion > 0x0100) + dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", + wa_descr->bcdWAVersion & 0xff00 >> 8, + wa_descr->bcdWAVersion & 0x00ff); + result = 0; +error: + return result; +} + +static struct hc_driver hwahc_hc_driver = { + .description = "hwa-hcd", + .product_desc = "Wireless USB HWA host controller", + .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd), + .irq = NULL, /* FIXME */ + .flags = HCD_USB2, /* FIXME */ + .reset = hwahc_op_reset, + .start = hwahc_op_start, + .pci_suspend = hwahc_op_suspend, + .pci_resume = hwahc_op_resume, + .stop = hwahc_op_stop, + .get_frame_number = hwahc_op_get_frame_number, + .urb_enqueue = hwahc_op_urb_enqueue, + .urb_dequeue = hwahc_op_urb_dequeue, + .endpoint_disable = hwahc_op_endpoint_disable, + + .hub_status_data = wusbhc_rh_status_data, + .hub_control = wusbhc_rh_control, + .bus_suspend = wusbhc_rh_suspend, + .bus_resume = wusbhc_rh_resume, + .start_port_reset = wusbhc_rh_start_port_reset, +}; + +static int hwahc_security_create(struct hwahc *hwahc) +{ + int result; + struct wusbhc *wusbhc = &hwahc->wusbhc; + struct usb_device *usb_dev = hwahc->wa.usb_dev; + struct device *dev = &usb_dev->dev; + struct usb_security_descriptor *secd; + struct usb_encryption_descriptor *etd; + void *itr, *top; + size_t itr_size, needed, bytes; + u8 index; + char buf[64]; + + /* Find the host's security descriptors in the config descr bundle */ + index = (usb_dev->actconfig - usb_dev->config) / + sizeof(usb_dev->config[0]); + itr = usb_dev->rawdescriptors[index]; + itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); + top = itr + itr_size; + result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index], + le16_to_cpu(usb_dev->actconfig->desc.wTotalLength), + USB_DT_SECURITY, (void **) &secd); + if (result == -1) { + dev_warn(dev, "BUG? WUSB host has no security descriptors\n"); + return 0; + } + needed = sizeof(*secd); + if (top - (void *)secd < needed) { + dev_err(dev, "BUG? Not enough data to process security " + "descriptor header (%zu bytes left vs %zu needed)\n", + top - (void *) secd, needed); + return 0; + } + needed = le16_to_cpu(secd->wTotalLength); + if (top - (void *)secd < needed) { + dev_err(dev, "BUG? Not enough data to process security " + "descriptors (%zu bytes left vs %zu needed)\n", + top - (void *) secd, needed); + return 0; + } + /* Walk over the sec descriptors and store CCM1's on wusbhc */ + itr = (void *) secd + sizeof(*secd); + top = (void *) secd + le16_to_cpu(secd->wTotalLength); + index = 0; + bytes = 0; + while (itr < top) { + etd = itr; + if (top - itr < sizeof(*etd)) { + dev_err(dev, "BUG: bad host security descriptor; " + "not enough data (%zu vs %zu left)\n", + top - itr, sizeof(*etd)); + break; + } + if (etd->bLength < sizeof(*etd)) { + dev_err(dev, "BUG: bad host encryption descriptor; " + "descriptor is too short " + "(%zu vs %zu needed)\n", + (size_t)etd->bLength, sizeof(*etd)); + break; + } + itr += etd->bLength; + bytes += snprintf(buf + bytes, sizeof(buf) - bytes, + "%s (0x%02x) ", + wusb_et_name(etd->bEncryptionType), + etd->bEncryptionValue); + wusbhc->ccm1_etd = etd; + } + dev_info(dev, "supported encryption types: %s\n", buf); + if (wusbhc->ccm1_etd == NULL) { + dev_err(dev, "E: host doesn't support CCM-1 crypto\n"); + return 0; + } + /* Pretty print what we support */ + return 0; +} + +static void hwahc_security_release(struct hwahc *hwahc) +{ + /* nothing to do here so far... */ +} + +static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface) +{ + int result; + struct device *dev = &iface->dev; + struct wusbhc *wusbhc = &hwahc->wusbhc; + struct wahc *wa = &hwahc->wa; + struct usb_device *usb_dev = interface_to_usbdev(iface); + + wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */ + wa->usb_iface = usb_get_intf(iface); + wusbhc->dev = dev; + wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent); + if (wusbhc->uwb_rc == NULL) { + result = -ENODEV; + dev_err(dev, "Cannot get associated UWB Host Controller\n"); + goto error_rc_get; + } + result = wa_fill_descr(wa); /* Get the device descriptor */ + if (result < 0) + goto error_fill_descriptor; + if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) { + dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB " + "adapter (%u ports)\n", wa->wa_descr->bNumPorts); + wusbhc->ports_max = USB_MAXCHILDREN; + } else { + wusbhc->ports_max = wa->wa_descr->bNumPorts; + } + wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs; + wusbhc->start = __hwahc_op_wusbhc_start; + wusbhc->stop = __hwahc_op_wusbhc_stop; + wusbhc->mmcie_add = __hwahc_op_mmcie_add; + wusbhc->mmcie_rm = __hwahc_op_mmcie_rm; + wusbhc->dev_info_set = __hwahc_op_dev_info_set; + wusbhc->bwa_set = __hwahc_op_bwa_set; + wusbhc->set_num_dnts = __hwahc_op_set_num_dnts; + wusbhc->set_ptk = __hwahc_op_set_ptk; + wusbhc->set_gtk = __hwahc_op_set_gtk; + result = hwahc_security_create(hwahc); + if (result < 0) { + dev_err(dev, "Can't initialize security: %d\n", result); + goto error_security_create; + } + wa->wusb = wusbhc; /* FIXME: ugly, need to fix */ + result = wusbhc_create(&hwahc->wusbhc); + if (result < 0) { + dev_err(dev, "Can't create WUSB HC structures: %d\n", result); + goto error_wusbhc_create; + } + result = wa_create(&hwahc->wa, iface); + if (result < 0) + goto error_wa_create; + return 0; + +error_wa_create: + wusbhc_destroy(&hwahc->wusbhc); +error_wusbhc_create: + /* WA Descr fill allocs no resources */ +error_security_create: +error_fill_descriptor: + uwb_rc_put(wusbhc->uwb_rc); +error_rc_get: + usb_put_intf(iface); + usb_put_dev(usb_dev); + return result; +} + +static void hwahc_destroy(struct hwahc *hwahc) +{ + struct wusbhc *wusbhc = &hwahc->wusbhc; + + d_fnstart(1, NULL, "(hwahc %p)\n", hwahc); + mutex_lock(&wusbhc->mutex); + __wa_destroy(&hwahc->wa); + wusbhc_destroy(&hwahc->wusbhc); + hwahc_security_release(hwahc); + hwahc->wusbhc.dev = NULL; + uwb_rc_put(wusbhc->uwb_rc); + usb_put_intf(hwahc->wa.usb_iface); + usb_put_dev(hwahc->wa.usb_dev); + mutex_unlock(&wusbhc->mutex); + d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc); +} + +static void hwahc_init(struct hwahc *hwahc) +{ + wa_init(&hwahc->wa); +} + +static int hwahc_probe(struct usb_interface *usb_iface, + const struct usb_device_id *id) +{ + int result; + struct usb_hcd *usb_hcd; + struct wusbhc *wusbhc; + struct hwahc *hwahc; + struct device *dev = &usb_iface->dev; + + d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id); + result = -ENOMEM; + usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); + if (usb_hcd == NULL) { + dev_err(dev, "unable to allocate instance\n"); + goto error_alloc; + } + usb_hcd->wireless = 1; + usb_hcd->flags |= HCD_FLAG_SAW_IRQ; + wusbhc = usb_hcd_to_wusbhc(usb_hcd); + hwahc = container_of(wusbhc, struct hwahc, wusbhc); + hwahc_init(hwahc); + result = hwahc_create(hwahc, usb_iface); + if (result < 0) { + dev_err(dev, "Cannot initialize internals: %d\n", result); + goto error_hwahc_create; + } + result = usb_add_hcd(usb_hcd, 0, 0); + if (result < 0) { + dev_err(dev, "Cannot add HCD: %d\n", result); + goto error_add_hcd; + } + result = wusbhc_b_create(&hwahc->wusbhc); + if (result < 0) { + dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); + goto error_wusbhc_b_create; + } + d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id); + return 0; + +error_wusbhc_b_create: + usb_remove_hcd(usb_hcd); +error_add_hcd: + hwahc_destroy(hwahc); +error_hwahc_create: + usb_put_hcd(usb_hcd); +error_alloc: + d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result); + return result; +} + +static void hwahc_disconnect(struct usb_interface *usb_iface) +{ + struct usb_hcd *usb_hcd; + struct wusbhc *wusbhc; + struct hwahc *hwahc; + + usb_hcd = usb_get_intfdata(usb_iface); + wusbhc = usb_hcd_to_wusbhc(usb_hcd); + hwahc = container_of(wusbhc, struct hwahc, wusbhc); + + d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface); + wusbhc_b_destroy(&hwahc->wusbhc); + usb_remove_hcd(usb_hcd); + hwahc_destroy(hwahc); + usb_put_hcd(usb_hcd); + d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc, + usb_iface); +} + +/** USB device ID's that we handle */ +static struct usb_device_id hwahc_id_table[] = { + /* FIXME: use class labels for this */ + { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, + {}, +}; +MODULE_DEVICE_TABLE(usb, hwahc_id_table); + +static struct usb_driver hwahc_driver = { + .name = "hwa-hc", + .probe = hwahc_probe, + .disconnect = hwahc_disconnect, + .id_table = hwahc_id_table, +}; + +static int __init hwahc_driver_init(void) +{ + int result; + result = usb_register(&hwahc_driver); + if (result < 0) { + printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n", + result); + goto error_usb_register; + } + return 0; + +error_usb_register: + return result; + +} +module_init(hwahc_driver_init); + +static void __exit hwahc_driver_exit(void) +{ + usb_deregister(&hwahc_driver); +} +module_exit(hwahc_driver_exit); + + +MODULE_AUTHOR("Inaky Perez-Gonzalez "); +MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From f51c23baedf191734364d3aae034d8c9c4cd8cf8 Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:31 +0100 Subject: wusb: fix error path for wusb_set_dev_addr() Error path for wusb_set_dev_addr() was handled incorrectly. Fix it by considering error only when return value is negative. Signed-off-by: Anderson Lizardo Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index f05f9b4..30d7020 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c @@ -239,8 +239,8 @@ static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, "port %u\n", dev_addr, port_idx); result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr); - if (result) - return NULL; + if (result < 0) + return NULL; } wusb_dev->entry_ts = jiffies; list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); @@ -1301,7 +1301,7 @@ int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr) wusb_dev->addr = addr; result = wusbhc->dev_info_set(wusbhc, wusb_dev); - if (result) + if (result < 0) dev_err(wusbhc->dev, "device %d: failed to set device " "address\n", wusb_dev->port_idx); else -- cgit v0.10.2 From b0a81328c287052cedf28e06d9b4648ad10b72d7 Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:32 +0100 Subject: wusb: fix bmRequestType for Abort RPipe request WUSB 1.0 (Table 8-4) mentions that Abort RPipe requests must have bmRequestType equal to 0x25, although current implementation sets bmRequestType to 0xa5. This patch fixes this typo. Signed-off-by: Anderson Lizardo Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c index bfe3752..f18e4aa 100644 --- a/drivers/usb/wusbcore/wa-rpipe.c +++ b/drivers/usb/wusbcore/wa-rpipe.c @@ -548,7 +548,7 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) result = usb_control_msg( wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), USB_REQ_RPIPE_ABORT, - USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); if (result < 0 && result != -ENODEV /* dev is gone */) d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n", -- cgit v0.10.2 From 9d839477f85438695bd870898ffa77347e61b637 Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:33 +0100 Subject: uwb: Fix handling IEs with empty IE data in uwb_est_get_size() A DRP notification may sometimes have empty IE data. This patch fixes uwb_est_get_size() so that this case is handled properly. Signed-off-by: Anderson Lizardo Signed-off-by: David Vrabel diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c index 1667afb..3791cd9 100644 --- a/drivers/uwb/est.c +++ b/drivers/uwb/est.c @@ -395,7 +395,7 @@ ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est, case UWB_EST_8: type_size = sizeof(u8); break; default: BUG(); } - if (offset + type_size >= rceb_size) { + if (offset + type_size > rceb_size) { if (printk_ratelimit()) dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: " "not enough data to read extra size\n", -- cgit v0.10.2 From 9d53b1bebe23a6efe4f0a0ff75df2424e013fe07 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:34 +0100 Subject: uwb: initialize the debug sub-system Call uwb_dbg_init() so the debugfs files are accessible. Signed-off-by: David Vrabel diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c index 7eee8e4..521cdeb 100644 --- a/drivers/uwb/driver.c +++ b/drivers/uwb/driver.c @@ -119,6 +119,7 @@ static int __init uwb_subsys_init(void) if (result < 0) goto error_uwb_rc_class_register; uwbd_start(); + uwb_dbg_init(); return 0; error_uwb_rc_class_register: @@ -130,6 +131,7 @@ module_init(uwb_subsys_init); static void __exit uwb_subsys_exit(void) { + uwb_dbg_exit(); uwbd_stop(); class_unregister(&uwb_rc_class); uwb_est_destroy(); -- cgit v0.10.2 From e477a4982ff4e94d7de0bc522817b152f52e6bf9 Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:35 +0100 Subject: uwb: disable command/event filtering for D-Link DUB-1210 The D-Link DUB-1210 HWA uses commands and events from the WHCI specification, although reporting itself as WUSB compliant. Therefore, we disable WUSB command/event filtering for it. USB_DEVICE_AND_INTERFACE_INFO is used for matching only the RC interface. Signed-off-by: Anderson Lizardo Signed-off-by: David Vrabel diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index f822a18..9b7e23d 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -62,6 +62,9 @@ #define D_LOCAL 1 #include +/* The device uses commands and events from the WHCI specification, although + * reporting itself as WUSB compliant. */ +#define WUSB_QUIRK_WHCI_CMD_EVT 0x01 /** * Descriptor for an instance of the UWB Radio Control Driver that @@ -835,8 +838,13 @@ static int hwarc_probe(struct usb_interface *iface, uwb_rc->stop = hwarc_neep_release; uwb_rc->cmd = hwarc_cmd; uwb_rc->reset = hwarc_reset; - uwb_rc->filter_cmd = hwarc_filter_cmd; - uwb_rc->filter_event = hwarc_filter_event; + if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) { + uwb_rc->filter_cmd = NULL; + uwb_rc->filter_event = NULL; + } else { + uwb_rc->filter_cmd = hwarc_filter_cmd; + uwb_rc->filter_event = hwarc_filter_event; + } result = uwb_rc_add(uwb_rc, dev, hwarc); if (result < 0) @@ -876,6 +884,10 @@ static void hwarc_disconnect(struct usb_interface *iface) /** USB device ID's that we handle */ static struct usb_device_id hwarc_id_table[] = { + /* D-Link DUB-1210 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02), + .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, + /* Generic match for the Radio Control interface */ { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), }, { }, }; -- cgit v0.10.2 From fa21183326882a886ecf74cfebcfb479d56e8469 Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:36 +0100 Subject: uwb: add Intel i1480 HWA to the UWB RC quirk table The Intel i1480 HWA uses WHCI commands/events even though reporting itself as WUSB compliant. This patch fixes this by marking it with the WUSB_QUIRK_WHCI_CMD_EVT flag, which disables WUSB command/event filtering. Signed-off-by: Anderson Lizardo Signed-off-by: David Vrabel diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 9b7e23d..3d26fa0 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -887,6 +887,9 @@ static struct usb_device_id hwarc_id_table[] = { /* D-Link DUB-1210 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02), .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, + /* Intel i1480 (using firmware 1.3PA2-20070828) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02), + .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, /* Generic match for the Radio Control interface */ { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), }, { }, -- cgit v0.10.2 From 8c7e8cb85557cc500122f3e489936582b7d11a7c Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:37 +0100 Subject: uwb: i1480: remove MAC/PHY information checking function Some hardware/firmware combinations (most notably an IOGear HWA using the i1480 firmware) kill the host controller after issuing a GET_MAC_PHY_INFO command. Removing this check seems harmless otherwise. The patch fixes the issue where the HC is killed, showing the message: ehci_hcd 0000:00:1d.7: HC died; cleaning up After this error, USB comes back only after reloading the ehci_hcd module. Signed-off-by: Anderson Lizardo Signed-off-by: David Vrabel diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c index ebffaf5..aec4146 100644 --- a/drivers/uwb/i1480/dfu/dfu.c +++ b/drivers/uwb/i1480/dfu/dfu.c @@ -127,98 +127,6 @@ error: EXPORT_SYMBOL_GPL(i1480_cmd); -/** - * Get information about the MAC and PHY - * - * @wa: Wired adaptor - * @neh: Notification/event handler - * @reply: Pointer to the reply event buffer - * @returns: 0 if ok, < 0 errno code on error. - */ -static -int i1480_cmd_get_mac_phy_info(struct i1480 *i1480) -{ - int result; - struct uwb_rccb *cmd = i1480->cmd_buf; - struct i1480_evt_confirm_GMPI *reply = i1480->evt_buf; - - cmd->bCommandType = i1480_CET_VS1; - cmd->wCommand = cpu_to_le16(i1480_CMD_GET_MAC_PHY_INFO); - reply->rceb.bEventType = i1480_CET_VS1; - reply->rceb.wEvent = i1480_EVT_GET_MAC_PHY_INFO; - result = i1480_cmd(i1480, "GET_MAC_PHY_INFO", sizeof(*cmd), - sizeof(*reply)); - if (result < 0) - goto out; - if (le16_to_cpu(reply->status) != 0x00) { - dev_err(i1480->dev, - "GET_MAC_PHY_INFO: command execution failed: %d\n", - reply->status); - result = -EIO; - } -out: - return result; -} - - -/** - * Get i1480's info and print it - * - * @wa: Wire Adapter - * @neh: Notification/event handler - * @returns: 0 if ok, < 0 errno code on error. - */ -static -int i1480_check_info(struct i1480 *i1480) -{ - struct i1480_evt_confirm_GMPI *reply = i1480->evt_buf; - int result; - unsigned mac_fw_rev; -#if i1480_FW <= 0x00000302 - unsigned phy_fw_rev; -#endif - if (i1480->quirk_no_check_info) { - dev_err(i1480->dev, "firmware info check disabled\n"); - return 0; - } - - result = i1480_cmd_get_mac_phy_info(i1480); - if (result < 0) { - dev_err(i1480->dev, "Cannot get MAC & PHY information: %d\n", - result); - goto out; - } - mac_fw_rev = le16_to_cpu(reply->mac_fw_rev); -#if i1480_FW > 0x00000302 - dev_info(i1480->dev, - "HW v%02hx " - "MAC FW v%02hx.%02hx caps %04hx " - "PHY type %02hx v%02hx caps %02hx %02hx %02hx\n", - reply->hw_rev, mac_fw_rev >> 8, mac_fw_rev & 0xff, - le16_to_cpu(reply->mac_caps), - reply->phy_vendor, reply->phy_rev, - reply->phy_caps[0], reply->phy_caps[1], reply->phy_caps[2]); -#else - phy_fw_rev = le16_to_cpu(reply->phy_fw_rev); - dev_info(i1480->dev, "MAC FW v%02hx.%02hx caps %04hx " - " PHY FW v%02hx.%02hx caps %04hx\n", - mac_fw_rev >> 8, mac_fw_rev & 0xff, - le16_to_cpu(reply->mac_caps), - phy_fw_rev >> 8, phy_fw_rev & 0xff, - le16_to_cpu(reply->phy_caps)); -#endif - dev_dbg(i1480->dev, - "key-stores:%hu mcast-addr-stores:%hu sec-modes:%hu\n", - (unsigned short) reply->key_stores, - le16_to_cpu(reply->mcast_addr_stores), - (unsigned short) reply->sec_mode_supported); - /* FIXME: complain if fw version too low -- pending for - * numbering to stabilize */ -out: - return result; -} - - static int i1480_print_state(struct i1480 *i1480) { @@ -264,12 +172,10 @@ int i1480_fw_upload(struct i1480 *i1480) i1480_print_state(i1480); goto error_rc_release; } - result = i1480_check_info(i1480); - if (result < 0) { - dev_warn(i1480->dev, "Warning! Cannot check firmware info: %d\n", - result); - result = 0; - } + /* + * FIXME: find some reliable way to check whether firmware is running + * properly. Maybe use some standard request that has no side effects? + */ dev_info(i1480->dev, "firmware uploaded successfully\n"); error_rc_release: if (i1480->rc_release) diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h index 4103b28..8035418 100644 --- a/drivers/uwb/i1480/dfu/i1480-dfu.h +++ b/drivers/uwb/i1480/dfu/i1480-dfu.h @@ -56,7 +56,6 @@ * request_firmware() * i1480_mpi_write() * i1480->cmd() [i1480_{usb,pci}_cmd()] - * i1480_check_info() * * Once the probe function enumerates the device and uploads the * firmware, we just exit with -ENODEV, as we don't really want to @@ -130,8 +129,6 @@ struct i1480 { void *evt_buf, *cmd_buf; ssize_t evt_result; struct completion evt_complete; - - u8 quirk_no_check_info:1; }; static inline -- cgit v0.10.2 From b5784f10d559b3c7b25874b21702ad0907b6fcb7 Mon Sep 17 00:00:00 2001 From: Anderson Lizardo Date: Wed, 17 Sep 2008 16:34:38 +0100 Subject: uwb: i1480/GUWA100U: fix firmware download issues IOGear firmware versions >= 1.4.12224 fail to be downloaded because of a spurious (and harmless) RCEB received after the download notification. This patch handles this RCEB and keeps compatibility with future versions that might not emit this RCEB. i1480_rceb_check() is reused to check for the RCEB. It is also refactored with improved comments and reused in another place in mac.c where the checking was being duplicated. This patch was tested on both i1480 and GUWA100U HWAs, with all firmware versions currently available. Signed-off-by: Anderson Lizardo diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c index aec4146..9097b3b 100644 --- a/drivers/uwb/i1480/dfu/dfu.c +++ b/drivers/uwb/i1480/dfu/dfu.c @@ -37,29 +37,45 @@ #define D_LOCAL 0 #include -/** @return 0 if If @evt is a valid reply event; otherwise complain */ +/** + * i1480_rceb_check - Check RCEB for expected field values + * @i1480: pointer to device for which RCEB is being checked + * @rceb: RCEB being checked + * @cmd: which command the RCEB is related to + * @context: expected context + * @expected_type: expected event type + * @expected_event: expected event + * + * If @cmd is NULL, do not print error messages, but still return an error + * code. + * + * Return 0 if @rceb matches the expected values, -EINVAL otherwise. + */ int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, - const char *cmd, u8 context, - unsigned expected_type, unsigned expected_event) + const char *cmd, u8 context, u8 expected_type, + unsigned expected_event) { int result = 0; struct device *dev = i1480->dev; if (rceb->bEventContext != context) { - dev_err(dev, "%s: " - "unexpected context id 0x%02x (expected 0x%02x)\n", - cmd, rceb->bEventContext, context); + if (cmd) + dev_err(dev, "%s: unexpected context id 0x%02x " + "(expected 0x%02x)\n", cmd, + rceb->bEventContext, context); result = -EINVAL; } if (rceb->bEventType != expected_type) { - dev_err(dev, "%s: " - "unexpected event type 0x%02x (expected 0x%02x)\n", - cmd, rceb->bEventType, expected_type); + if (cmd) + dev_err(dev, "%s: unexpected event type 0x%02x " + "(expected 0x%02x)\n", cmd, + rceb->bEventType, expected_type); result = -EINVAL; } if (le16_to_cpu(rceb->wEvent) != expected_event) { - dev_err(dev, "%s: " - "unexpected event 0x%04x (expected 0x%04x)\n", - cmd, le16_to_cpu(rceb->wEvent), expected_event); + if (cmd) + dev_err(dev, "%s: unexpected event 0x%04x " + "(expected 0x%04x)\n", cmd, + le16_to_cpu(rceb->wEvent), expected_event); result = -EINVAL; } return result; @@ -110,6 +126,20 @@ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, cmd_name, result); goto error; } + /* + * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a + * spurious notification after firmware is downloaded. So check whether + * the receibed RCEB is such notification before assuming that the + * command has failed. + */ + if (i1480_rceb_check(i1480, i1480->evt_buf, NULL, + 0, 0xfd, 0x0022) == 0) { + /* Now wait for the actual RCEB for this command. */ + result = i1480->wait_init_done(i1480); + if (result < 0) + goto error; + result = i1480->evt_result; + } if (result != reply_size) { dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n", cmd_name, result, reply_size); diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h index 8035418..46f45e8 100644 --- a/drivers/uwb/i1480/dfu/i1480-dfu.h +++ b/drivers/uwb/i1480/dfu/i1480-dfu.h @@ -145,7 +145,7 @@ extern int i1480_phy_fw_upload(struct i1480 *); extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t); extern int i1480_rceb_check(const struct i1480 *, const struct uwb_rceb *, const char *, u8, - unsigned, unsigned); + u8, unsigned); enum { /* Vendor specific command type */ diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c index 3d44554..8d06990 100644 --- a/drivers/uwb/i1480/dfu/mac.c +++ b/drivers/uwb/i1480/dfu/mac.c @@ -507,8 +507,8 @@ int i1480_mac_fw_upload(struct i1480 *i1480) goto error_size; } result = -EIO; - if (rcebe->rceb.bEventType != i1480_CET_VS1 - || le16_to_cpu(rcebe->rceb.wEvent) != i1480_EVT_RM_INIT_DONE) { + if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1, + i1480_EVT_RM_INIT_DONE) < 0) { dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x " "received; expected 0x%02x/%04x/00\n", rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent), -- cgit v0.10.2 From b63795fa3a41151040b86119750a7df508d40cda Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:39 +0100 Subject: uwb: dont tranmit identification IEs The current identification IE doesn't include any useful information (the vendor ID is from the EUI-48) and it causes problems with certain hardware/firmware so don't transmit one. Signed-off-by: David Vrabel diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c index d54fe09..cf6f3d1 100644 --- a/drivers/uwb/ie.c +++ b/drivers/uwb/ie.c @@ -539,32 +539,3 @@ int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) return result; } EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); - - -/** - * Create and set new Identification IE - * - * Currently only sets the Vendor ID. The Vendor ID is set from the OUI, - * which is obtained from the first three bytes from the MAC address. - */ -int uwb_rc_set_identification_ie(struct uwb_rc *uwb_rc) -{ - struct { - struct uwb_identification_ie id_ie; - struct uwb_dev_info dev_info; - struct uwb_vendor_id vendor_id; - } ie_data; - - ie_data.id_ie.hdr.element_id = UWB_IDENTIFICATION_IE; - ie_data.id_ie.hdr.length = sizeof(struct uwb_dev_info) + - sizeof(struct uwb_vendor_id); - - ie_data.dev_info.type = UWB_DEV_INFO_VENDOR_ID; - ie_data.dev_info.length = sizeof(struct uwb_vendor_id); - - ie_data.vendor_id.data[0] = uwb_rc->uwb_dev.mac_addr.data[0]; - ie_data.vendor_id.data[1] = uwb_rc->uwb_dev.mac_addr.data[1]; - ie_data.vendor_id.data[2] = uwb_rc->uwb_dev.mac_addr.data[2]; - - return uwb_rc_ie_add(uwb_rc, &ie_data.id_ie.hdr, sizeof(ie_data)); -} diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index a21c96b..ee5772f 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c @@ -211,12 +211,6 @@ static int uwb_rc_setup(struct uwb_rc *rc) dev_err(dev, "cannot setup IE subsystem: %d\n", result); goto error_ie_setup; } - result = uwb_rc_set_identification_ie(rc); - if (result < 0) { - dev_err(dev, "cannot set Identification IE: %d\n", - result); - goto error_set_id_ie; - } result = uwb_rsv_setup(rc); if (result < 0) { dev_err(dev, "cannot setup reservation subsystem: %d\n", result); diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h index 4f525a8..2ad307d 100644 --- a/drivers/uwb/uwb-internal.h +++ b/drivers/uwb/uwb-internal.h @@ -74,7 +74,6 @@ extern void uwb_rc_ie_release(struct uwb_rc *); extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); -extern int uwb_rc_set_identification_ie(struct uwb_rc *); extern const char *uwb_rc_strerror(unsigned code); -- cgit v0.10.2 From b60066c141997ac2e4ef08459b75638ae86ae781 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:40 +0100 Subject: uwb: add symlinks in sysfs between radio controllers and PALs Add a facility for PALs to have symlinks to their radio controller (and vice-versa) and make WUSB host controllers use this. Signed-off-by: David Vrabel diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c index cc126b4..7cc51e9 100644 --- a/drivers/usb/wusbcore/pal.c +++ b/drivers/usb/wusbcore/pal.c @@ -26,6 +26,9 @@ int wusbhc_pal_register(struct wusbhc *wusbhc) { uwb_pal_init(&wusbhc->pal); + wusbhc->pal.name = "wusbhc"; + wusbhc->pal.device = wusbhc->usb_hcd.self.controller; + return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal); } diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c index 1149b1e..07c63a3 100644 --- a/drivers/usb/wusbcore/wusbhc.c +++ b/drivers/usb/wusbcore/wusbhc.c @@ -192,13 +192,8 @@ int wusbhc_create(struct wusbhc *wusbhc) result = wusbhc_sec_create(wusbhc); if (result < 0) goto error_sec_create; - result = wusbhc_pal_register(wusbhc); - if (result < 0) - goto error_pal_register; return 0; -error_pal_register: - wusbhc_sec_destroy(wusbhc); error_sec_create: wusbhc_rh_destroy(wusbhc); error_rh_create: @@ -235,7 +230,14 @@ int wusbhc_b_create(struct wusbhc *wusbhc) dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result); goto error_create_attr_group; } - /* Yep, I plan to add stuff here... */ + + result = wusbhc_pal_register(wusbhc); + if (result < 0) + goto error_pal_register; + return 0; + +error_pal_register: + sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); error_create_attr_group: return result; } @@ -243,13 +245,13 @@ EXPORT_SYMBOL_GPL(wusbhc_b_create); void wusbhc_b_destroy(struct wusbhc *wusbhc) { + wusbhc_pal_unregister(wusbhc); sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group); } EXPORT_SYMBOL_GPL(wusbhc_b_destroy); void wusbhc_destroy(struct wusbhc *wusbhc) { - wusbhc_pal_unregister(wusbhc); wusbhc_sec_destroy(wusbhc); wusbhc_rh_destroy(wusbhc); wusbhc_devconnect_destroy(wusbhc); diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index 5508993..1afb38e 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c @@ -39,6 +39,21 @@ EXPORT_SYMBOL_GPL(uwb_pal_init); */ int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) { + int ret; + + if (pal->device) { + ret = sysfs_create_link(&pal->device->kobj, + &rc->uwb_dev.dev.kobj, "uwb_rc"); + if (ret < 0) + return ret; + ret = sysfs_create_link(&rc->uwb_dev.dev.kobj, + &pal->device->kobj, pal->name); + if (ret < 0) { + sysfs_remove_link(&pal->device->kobj, "uwb_rc"); + return ret; + } + } + spin_lock(&rc->pal_lock); list_add(&pal->node, &rc->pals); spin_unlock(&rc->pal_lock); @@ -57,6 +72,11 @@ void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) spin_lock(&rc->pal_lock); list_del(&pal->node); spin_unlock(&rc->pal_lock); + + if (pal->device) { + sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); + sysfs_remove_link(&pal->device->kobj, "uwb_rc"); + } } EXPORT_SYMBOL_GPL(uwb_pal_unregister); diff --git a/include/linux/uwb.h b/include/linux/uwb.h index 0cd3593..f9ccbd9 100644 --- a/include/linux/uwb.h +++ b/include/linux/uwb.h @@ -361,6 +361,9 @@ struct uwb_rc { /** * struct uwb_pal - a UWB PAL + * @name: descriptive name for this PAL (wushc, wlp, etc.). + * @device: a device for the PAL. Used to link the PAL and the radio + * controller in sysfs. * @new_rsv: called when a peer requests a reservation (may be NULL if * the PAL cannot accept reservation requests). * @@ -379,7 +382,8 @@ struct uwb_rc { */ struct uwb_pal { struct list_head node; - + const char *name; + struct device *device; void (*new_rsv)(struct uwb_rsv *rsv); }; -- cgit v0.10.2 From c8cf2465fcfc16877f4f9e8dccc6b89b543fa2c5 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:41 +0100 Subject: uwb: document UWB and WUSB sysfs files Add some brief documentation on the UWB and WUSB related sysfs files. Signed-off-by: David Vrabel diff --git a/Documentation/ABI/testing/sysfs-bus-umc b/Documentation/ABI/testing/sysfs-bus-umc new file mode 100644 index 0000000..948fec4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-umc @@ -0,0 +1,28 @@ +What: /sys/bus/umc/ +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The Wireless Host Controller Interface (WHCI) + specification describes a PCI-based device with + multiple capabilities; the UWB Multi-interface + Controller (UMC). + + The umc bus presents each of the individual + capabilties as a device. + +What: /sys/bus/umc/devices/.../capability_id +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The ID of this capability, with 0 being the radio + controller capability. + +What: /sys/bus/umc/devices/.../version +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The specification version this capability's hardware + interface complies with. diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index 11a3c16..273a7f0 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -85,3 +85,46 @@ Description: Users: PowerTOP http://www.lesswatts.org/projects/powertop/ + +What: /sys/bus/usb/device/.../authorized +Date: July 2008 +KernelVersion: 2.6.26 +Contact: David Vrabel +Description: + Authorized devices are available for use by device + drivers, non-authorized one are not. By default, wired + USB devices are authorized. + + Certified Wireless USB devices are not authorized + initially and should be (by writing 1) after the + device has been authenticated. + +What: /sys/bus/usb/device/.../wusb_cdid +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + For Certified Wireless USB devices only. + + A devices's CDID, as 16 space-separated hex octets. + +What: /sys/bus/usb/device/.../wusb_ck +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + For Certified Wireless USB devices only. + + Write the device's connection key (CK) to start the + authentication of the device. The CK is 16 + space-separated hex octets. + +What: /sys/bus/usb/device/.../wusb_disconnect +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + For Certified Wireless USB devices only. + + Write a 1 to force the device to disconnect + (equivalent to unplugging a wired USB device). diff --git a/Documentation/ABI/testing/sysfs-class-usb_host b/Documentation/ABI/testing/sysfs-class-usb_host new file mode 100644 index 0000000..46b66ad1 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-usb_host @@ -0,0 +1,25 @@ +What: /sys/class/usb_host/usb_hostN/wusb_chid +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + Write the CHID (16 space-separated hex octets) for this host controller. + This starts the host controller, allowing it to accept connection from + WUSB devices. + + Set an all zero CHID to stop the host controller. + +What: /sys/class/usb_host/usb_hostN/wusb_trust_timeout +Date: July 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + Devices that haven't sent a WUSB packet to the host + within 'wusb_trust_timeout' ms are considered to have + disconnected and are removed. The default value of + 4000 ms is the value required by the WUSB + specification. + + Since this relates to security (specifically, the + lifetime of PTKs and GTKs) it should not be changed + from the default. diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc new file mode 100644 index 0000000..a0d18db --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-uwb_rc @@ -0,0 +1,144 @@ +What: /sys/class/uwb_rc +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + Interfaces for WiMedia Ultra Wideband Common Radio + Platform (UWB) radio controllers. + + Familiarity with the ECMA-368 'High Rate Ultra + Wideband MAC and PHY Specification' is assumed. + +What: /sys/class/uwb_rc/beacon_timeout_ms +Date: July 2008 +KernelVersion: 2.6.27 +Description: + If no beacons are received from a device for at least + this time, the device will be considered to have gone + and it will be removed. The default is 3 superframes + (~197 ms) as required by the specification. + +What: /sys/class/uwb_rc/uwbN/ +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + An individual UWB radio controller. + +What: /sys/class/uwb_rc/uwbN/beacon +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + Write: + + [] + + to start beaconing on a specific channel, or stop + beaconing if is -1. Valid channels depends + on the radio controller's supported band groups. + + may be used to try and join a specific + beacon group if more than one was found during a scan. + +What: /sys/class/uwb_rc/uwbN/scan +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + Write: + + [] + + to start (or stop) scanning on a channel. is one of: + 0 - scan + 1 - scan outside BP + 2 - scan while inactive + 3 - scanning disabled + 4 - scan (with start time of ) + +What: /sys/class/uwb_rc/uwbN/mac_address +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + The EUI-48, in colon-separated hex octets, for this + radio controller. A write will change the radio + controller's EUI-48 but only do so while the device is + not beaconing or scanning. + +What: /sys/class/uwb_rc/uwbN/wusbhc +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + A symlink to the device (if any) of the WUSB Host + Controller PAL using this radio controller. + +What: /sys/class/uwb_rc/uwbN// +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + A neighbour UWB device that has either been detected + as part of a scan or is a member of the radio + controllers beacon group. + +What: /sys/class/uwb_rc/uwbN//BPST +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + The time (using the radio controllers internal 1 ms + interval superframe timer) of the last beacon from + this device was received. + +What: /sys/class/uwb_rc/uwbN//DevAddr +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + The current DevAddr of this device in colon separated + hex octets. + +What: /sys/class/uwb_rc/uwbN//EUI_48 +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + + The EUI-48 of this device in colon separated hex + octets. + +What: /sys/class/uwb_rc/uwbN//BPST +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + +What: /sys/class/uwb_rc/uwbN//IEs +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + The latest IEs included in this device's beacon, in + space separated hex octets with one IE per line. + +What: /sys/class/uwb_rc/uwbN//LQE +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + Link Quality Estimate - the Signal to Noise Ratio + (SNR) of all packets received from this device in dB. + This gives an estimate on a suitable PHY rate. Refer + to [ECMA-368] section 13.3 for more details. + +What: /sys/class/uwb_rc/uwbN//RSSI +Date: July 2008 +KernelVersion: 2.6.27 +Contact: linux-usb@vger.kernel.org +Description: + Received Signal Strength Indication - the strength of + the received signal in dB. LQE is a more useful + measure of the radio link quality. diff --git a/Documentation/ABI/testing/sysfs-wusb_cbaf b/Documentation/ABI/testing/sysfs-wusb_cbaf new file mode 100644 index 0000000..a99c5f8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-wusb_cbaf @@ -0,0 +1,100 @@ +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_* +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + Various files for managing Cable Based Association of + (wireless) USB devices. + + The sequence of operations should be: + + 1. Device is plugged in. + + 2. The connection manager (CM) sees a device with CBA capability. + (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE). + + 3. The CM writes the host name, supported band groups, + and the CHID (host ID) into the wusb_host_name, + wusb_host_band_groups and wusb_chid files. These + get sent to the device and the CDID (if any) for + this host is requested. + + 4. The CM can verify that the device's supported band + groups (wusb_device_band_groups) are compatible + with the host. + + 5. The CM reads the wusb_cdid file. + + 6. The CM looks it up its database. + + - If it has a matching CHID,CDID entry, the device + has been authorized before and nothing further + needs to be done. + + - If the CDID is zero (or the CM doesn't find a + matching CDID in its database), the device is + assumed to be not known. The CM may associate + the host with device by: writing a randomly + generated CDID to wusb_cdid and then a random CK + to wusb_ck (this uploads the new CC to the + device). + + CMD may choose to prompt the user before + associating with a new device. + + 7. Device is unplugged. + + References: + [WUSB-AM] Association Models Supplement to the + Certified Wireless Universal Serial Bus + Specification, version 1.0. + +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_chid +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The CHID of the host formatted as 16 space-separated + hex octets. + + Writes fetches device's supported band groups and the + the CDID for any existing association with this host. + +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_name +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + A friendly name for the host as a UTF-8 encoded string. + +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_band_groups +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The band groups supported by the host, in the format + defined in [WUSB-AM]. + +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_device_band_groups +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The band groups supported by the device, in the format + defined in [WUSB-AM]. + +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_cdid +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + The device's CDID formatted as 16 space-separated hex + octets. + +What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_ck +Date: August 2008 +KernelVersion: 2.6.27 +Contact: David Vrabel +Description: + Write 16 space-separated random, hex octets to + associate with the device. -- cgit v0.10.2 From f1fa035f2bcb2b03cb90249e05ec2ae6927a7302 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 17 Sep 2008 16:34:42 +0100 Subject: wusb: wusb-cbaf (CBA driver) sysfs ABI simplification Simplify the sysfs ABI of the wusb-cbaf (Cable Based Association) driver: use one value per file and cause the write of the CHID to fetch the CDID (instead of requiring a separate read). Update the example wusb-cbaf script to work with this revised ABI. Signed-off-by: Felipe Zimmerle Signed-off-by: David Vrabel diff --git a/Documentation/usb/wusb-cbaf b/Documentation/usb/wusb-cbaf index a385478..2e78b70 100644 --- a/Documentation/usb/wusb-cbaf +++ b/Documentation/usb/wusb-cbaf @@ -70,32 +70,42 @@ EOF # FIXME: CHID should come from a database :), band group from the host host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff" host_band_group="0001" -host_name="Linux-WUSB" +host_name=$(hostname) devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)" -hdevs="$(find /sys -name wusb_chid -printf "%h\n")" +hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)" result=0 case $1 in start) for dev in ${2:-$hdevs} do - uwb_rc=$(find $(dirname $(dirname $dev)) -iname uwb_rc:uwb*) - if cat $uwb_rc/uwb_rc/beacon | grep -q "channel: -1" + uwb_rc=$(readlink -f $dev/uwb_rc) + if cat $uwb_rc/beacon | grep -q -- "-1" then - echo 13 0 | cat > $uwb_rc/uwb_rc/beacon - echo I: started beaconing on ch 13 in host $(basename $uwb_rc) + echo 13 0 > $uwb_rc/beacon + echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2 fi - echo $host_CHID | cat > $dev/wusb_chid - echo I: started host $(basename $dev) + echo $host_CHID > $dev/wusb_chid + echo I: started host $(basename $dev) >&2 + done + ;; + stop) + for dev in ${2:-$hdevs} + do + echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid + echo I: stopped host $(basename $dev) >&2 + uwb_rc=$(readlink -f $dev/uwb_rc) + echo -1 | cat > $uwb_rc/beacon + echo I: stopped beaconing on $(basename $uwb_rc) >&2 done ;; set-chid) shift - for dev in ${2:-$devs} - do - echo "${2:-$host_CHID}" "${3:-$host_band_group}" "${4:-$host_name}" \ - | cat > $dev/wusb_host_info + for dev in ${2:-$devs}; do + echo "${4:-$host_name}" > $dev/wusb_host_name + echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups + echo ${2:-$host_CHID} > $dev/wusb_chid done ;; get-cdid) @@ -105,21 +115,17 @@ case $1 in done ;; set-cc) - for dev in ${2:-$devs} - do - shift - CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)" - CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)" - cat > $dev/wusb_cc < $dev/wusb_cdid + echo "$CK" > $dev/wusb_ck + + echo I: CC set >&2 + echo "CHID: $(cat $dev/wusb_chid)" + echo "CDID:$CDID" + echo "CK: $CK" done ;; help|h|--help|-h) diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig index add077e..4ea7669 100644 --- a/drivers/usb/wusbcore/Kconfig +++ b/drivers/usb/wusbcore/Kconfig @@ -15,3 +15,26 @@ config USB_WUSB To compile this support select Y (built in). It is safe to select even if you don't have the hardware. + +config USB_WUSB_CBAF + tristate "Support WUSB Cable Based Association (CBA)" + depends on USB + help + Some WUSB devices support Cable Based Association. It's used to + enable the secure communication between the host and the + device. + + Enable this option if your WUSB device must to be connected + via wired USB before establishing a wireless link. + + It is safe to select even if you don't have a compatible + hardware. + +config USB_WUSB_CBAF_DEBUG + bool "Enable CBA debug messages" + depends on USB_WUSB_CBAF + help + Say Y here if you want the CBA to produce a bunch of debug messages + to the system log. Select this if you are having a problem with + CBA support and want to see more of what is going on. + diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile index 7a4d007..75f1ade 100644 --- a/drivers/usb/wusbcore/Makefile +++ b/drivers/usb/wusbcore/Makefile @@ -1,5 +1,7 @@ -obj-$(CONFIG_USB_WUSB) += wusbcore.o wusb-cbaf.o +obj-$(CONFIG_USB_WUSB) += wusbcore.o obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o +obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o + wusbcore-objs := \ crypto.o \ @@ -18,3 +20,7 @@ wusb-wa-objs := wa-hc.o \ wa-nep.o \ wa-rpipe.o \ wa-xfer.o + +ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c index 584eabe..ab4788d 100644 --- a/drivers/usb/wusbcore/cbaf.c +++ b/drivers/usb/wusbcore/cbaf.c @@ -4,6 +4,7 @@ * * Copyright (C) 2006 Intel Corporation * Inaky Perez-Gonzalez + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version @@ -20,14 +21,13 @@ * 02110-1301, USA. * * - * WUSB devices have to be paired (authenticated in WUSB lingo) so + * WUSB devices have to be paired (associated in WUSB lingo) so * that they can connect to the system. * - * One way of pairing is using CBA-Cable Based Authentication, devices - * that can connect via wired or wireless USB. First time you plug - * them with a cable, pairing is done between host and device and - * subsequent times, you can connect wirelessly without having to - * pair. That's the idea. + * One way of pairing is using CBA-Cable Based Association. First + * time you plug the device with a cable, association is done between + * host and device and subsequent times, you can connect wirelessly + * without having to associate again. That's the idea. * * This driver does nothing Earth shattering. It just provides an * interface to chat with the wire-connected device so we can get a @@ -42,56 +42,49 @@ * * The process goes like this: * - * 1. device plugs, cbaf is loaded, notifications happen + * 1. Device plugs, cbaf is loaded, notifications happen. * - * 2. the connection manager sees a device with CBAF capability (the - * wusb_{host_info,cdid,cc} files are in /sys/device/blah/OURDEVICE). + * 2. The connection manager (CM) sees a device with CBAF capability + * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE). * - * 3. CM (connection manager) writes the CHID (host ID) and a host - * name into the wusb_host_info file. This gets sent to the device. + * 3. The CM writes the host name, supported band groups, and the CHID + * (host ID) into the wusb_host_name, wusb_host_band_groups and + * wusb_chid files. These get sent to the device and the CDID (if + * any) for this host is requested. * - * 4. CM cats the wusb_cdid file; this asks the device if it has any - * CDID associated to the CHDI we just wrote before. If it does, it - * is printed, along with the device 'friendly name' and the band - * groups the device supports. + * 4. The CM can verify that the device's supported band groups + * (wusb_device_band_groups) are compatible with the host. * - * 5. CM looks up its database + * 5. The CM reads the wusb_cdid file. * - * 5.1 If it has a matching CHID,CDID entry, the device has been - * authorized before (paired). Now we can optionally ask the user - * if he wants to allow the device to connect. Then we generate a - * new CDID and CK, send it to the device and update the database - * (writing to the wusb_cc file so they are uploaded to the device). + * 6. The CM looks up its database * - * 5.2 If the CDID is zero (or we didn't find a matching CDID in our - * database), we assume the device is not known. We ask the user - * if s/he wants to allow the device to be connected wirelessly - * to the system. If nope, nothing else is done (FIXME: maybe - * send a zero CDID to clean up our CHID?). If yes, we generate - * random CDID and CKs (and write them to the wusb_cc file so - * they are uploaded to the device). + * 6.1 If it has a matching CHID,CDID entry, the device has been + * authorized before (paired) and nothing further needs to be + * done. * - * 6. device is unplugged + * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in + * its database), the device is assumed to be not known. The CM + * may associate the host with device by: writing a randomly + * generated CDID to wusb_cdid and then a random CK to wusb_ck + * (this uploads the new CC to the device). * - * When the device tries to connect wirelessly, it will present it's - * CDID to the WUSB host controller with ID CHID, which will query the - * database. If found, the host will (with a 4way handshake) challenge - * the device to demonstrate it has the CK secret key (from our - * database) without actually exchanging it. Once satisfied, crypto - * keys are derived from the CK, the device is connected and all - * communication is crypted. + * CMD may choose to prompt the user before associating with a new + * device. * + * 7. Device is unplugged. * - * NOTES ABOUT THE IMPLEMENTATION + * When the device tries to connect wirelessly, it will present its + * CDID to the WUSB host controller. The CM will query the + * database. If the CHID/CDID pair found, it will (with a 4-way + * handshake) challenge the device to demonstrate it has the CK secret + * key (from our database) without actually exchanging it. Once + * satisfied, crypto keys are derived from the CK, the device is + * connected and all communication is encrypted. * - * The descriptors sent back and forth use this horrible format from - * hell on which each field is actually a field ID, field length and - * then the field itself. How stupid can that get, taking into account - * the structures are defined by the spec?? oh well. - * - * - * FIXME: we don't provide a way to tell the device the pairing failed - * (ie: send a CC_DATA_FAIL). Should add some day. + * References: + * [WUSB-AM] Association Models Supplement to the Certified Wireless + * Universal Serial Bus Specification, version 1.0. */ #include #include @@ -105,9 +98,7 @@ #include #include -#undef D_LOCAL -#define D_LOCAL 6 -#include +#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */ /* An instance of a Cable-Based-Association-Framework device */ struct cbaf { @@ -116,24 +107,27 @@ struct cbaf { void *buffer; size_t buffer_size; - struct wusb_ckhdid chid;/* Host Information */ - char host_name[65]; /* max length: - Assoc Models Suplement 1.0[T4-7] */ + struct wusb_ckhdid chid; + char host_name[CBA_NAME_LEN]; u16 host_band_groups; - struct wusb_ckhdid cdid;/* Device Information */ - char device_name[65]; /* max length: - Assoc Models Suplement 1.0[T4-7] */ + struct wusb_ckhdid cdid; + char device_name[CBA_NAME_LEN]; u16 device_band_groups; - struct wusb_ckhdid ck; /* Connection Key */ + + struct wusb_ckhdid ck; }; /* * Verify that a CBAF USB-interface has what we need * - * (like we care, we are going to fail the enumeration if not :) + * According to [WUSB-AM], CBA devices should provide at least two + * interfaces: + * - RETRIEVE_HOST_INFO + * - ASSOCIATE * - * FIXME: ugly function, need to split + * If the device doesn't provide these interfaces, we do not know how + * to deal with it. */ static int cbaf_check(struct cbaf *cbaf) { @@ -143,8 +137,7 @@ static int cbaf_check(struct cbaf *cbaf) struct wusb_cbaf_assoc_request *assoc_request; size_t assoc_size; void *itr, *top; - unsigned ar_index; - int ar_rhi_idx = -1, ar_assoc_idx = -1; + int ar_rhi = 0, ar_assoc = 0; result = usb_control_msg( cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), @@ -153,93 +146,91 @@ static int cbaf_check(struct cbaf *cbaf) 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); if (result < 0) { - dev_err(dev, "cannot get available association types: %d\n", + dev_err(dev, "Cannot get available association types: %d\n", result); - goto error_get_assoc_types; + return result; } + assoc_info = cbaf->buffer; if (result < sizeof(*assoc_info)) { - dev_err(dev, "not enough data to decode association info " + dev_err(dev, "Not enough data to decode association info " "header (%zu vs %zu bytes required)\n", (size_t)result, sizeof(*assoc_info)); - goto error_bad_header; + return result; } + assoc_size = le16_to_cpu(assoc_info->Length); if (result < assoc_size) { - dev_err(dev, "not enough data to decode association info " + dev_err(dev, "Not enough data to decode association info " "(%zu vs %zu bytes required)\n", (size_t)assoc_size, sizeof(*assoc_info)); - goto error_bad_data; + return result; } /* * From now on, we just verify, but won't error out unless we * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE} * types. */ - ar_index = 0; itr = cbaf->buffer + sizeof(*assoc_info); top = cbaf->buffer + assoc_size; - d_printf(1, dev, "Found %u association requests (%zu bytes)\n", + dev_dbg(dev, "Found %u association requests (%zu bytes)\n", assoc_info->NumAssociationRequests, assoc_size); + while (itr < top) { u16 ar_type, ar_subtype; u32 ar_size; const char *ar_name; assoc_request = itr; + if (top - itr < sizeof(*assoc_request)) { - dev_err(dev, "not enough data to decode associaton " + dev_err(dev, "Not enough data to decode associaton " "request (%zu vs %zu bytes needed)\n", top - itr, sizeof(*assoc_request)); break; } + ar_type = le16_to_cpu(assoc_request->AssociationTypeId); ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId); ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize); + ar_name = "unknown"; + switch (ar_type) { case AR_TYPE_WUSB: - /* Verify we have what is mandated by AMS1.0 */ + /* Verify we have what is mandated by [WUSB-AM]. */ switch (ar_subtype) { case AR_TYPE_WUSB_RETRIEVE_HOST_INFO: - ar_name = "retrieve_host_info"; - ar_rhi_idx = ar_index; + ar_name = "RETRIEVE_HOST_INFO"; + ar_rhi = 1; break; case AR_TYPE_WUSB_ASSOCIATE: /* send assoc data */ - ar_name = "associate"; - ar_assoc_idx = ar_index; + ar_name = "ASSOCIATE"; + ar_assoc = 1; break; - default: - ar_name = "unknown"; }; break; - default: - ar_name = "unknown"; }; - d_printf(1, dev, "association request #%02u: 0x%04x/%04x " + + dev_dbg(dev, "Association request #%02u: 0x%04x/%04x " "(%zu bytes): %s\n", assoc_request->AssociationDataIndex, ar_type, ar_subtype, (size_t)ar_size, ar_name); itr += sizeof(*assoc_request); - ar_index++; } - if (ar_rhi_idx == -1) { + + if (!ar_rhi) { dev_err(dev, "Missing RETRIEVE_HOST_INFO association " "request\n"); - goto error_bad_reqs; + return -EINVAL; } - if (ar_assoc_idx == -1) { + if (!ar_assoc) { dev_err(dev, "Missing ASSOCIATE association request\n"); - goto error_bad_reqs; + return -EINVAL; } - return 0; -error_bad_header: -error_bad_data: -error_bad_reqs: -error_get_assoc_types: - return -EINVAL; + return 0; } static const struct wusb_cbaf_host_info cbaf_host_info_defaults = { @@ -256,6 +247,7 @@ static const struct wusb_cbaf_host_info cbaf_host_info_defaults = { static int cbaf_send_host_info(struct cbaf *cbaf) { struct wusb_cbaf_host_info *hi; + size_t name_len; size_t hi_size; hi = cbaf->buffer; @@ -263,11 +255,11 @@ static int cbaf_send_host_info(struct cbaf *cbaf) *hi = cbaf_host_info_defaults; hi->CHID = cbaf->chid; hi->LangID = 0; /* FIXME: I guess... */ - strncpy(hi->HostFriendlyName, cbaf->host_name, - hi->HostFriendlyName_hdr.len); - hi->HostFriendlyName_hdr.len = - cpu_to_le16(strlen(hi->HostFriendlyName)); - hi_size = sizeof(*hi) + strlen(hi->HostFriendlyName); + strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN); + name_len = strlen(cbaf->host_name); + hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len); + hi_size = sizeof(*hi) + name_len; + return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), CBAF_REQ_SET_ASSOCIATION_RESPONSE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, @@ -276,8 +268,47 @@ static int cbaf_send_host_info(struct cbaf *cbaf) hi, hi_size, 1000 /* FIXME: arbitrary */); } -/* Show current CHID info we have set from user space */ -static ssize_t cbaf_wusb_host_info_show(struct device *dev, +/* + * Get device's information (CDID) associated to CHID + * + * The device will return it's information (CDID, name, bandgroups) + * associated to the CHID we have set before, or 0 CDID and default + * name and bandgroup if no CHID set or unknown. + */ +static int cbaf_cdid_get(struct cbaf *cbaf) +{ + int result; + struct device *dev = &cbaf->usb_iface->dev; + struct wusb_cbaf_device_info *di; + size_t needed; + + di = cbaf->buffer; + result = usb_control_msg( + cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), + CBAF_REQ_GET_ASSOCIATION_REQUEST, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, + di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); + if (result < 0) { + dev_err(dev, "Cannot request device information: %d\n", result); + return result; + } + + needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length); + if (result < needed) { + dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs " + "%zu bytes needed)\n", (size_t)result, needed); + return result; + } + + strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN); + cbaf->cdid = di->CDID; + cbaf->device_band_groups = le16_to_cpu(di->BandGroups); + + return 0; +} + +static ssize_t cbaf_wusb_chid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -286,17 +317,10 @@ static ssize_t cbaf_wusb_host_info_show(struct device *dev, char pr_chid[WUSB_CKHDID_STRSIZE]; ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid); - return scnprintf(buf, PAGE_SIZE, "CHID: %s\nName: %s\n", - pr_chid, cbaf->host_name); + return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid); } -/* - * Get a host info CHID from user space and send it to the device. - * - * The user can recover a CC from the device associated to that CHID - * by cat'ing wusb_connection_context. - */ -static ssize_t cbaf_wusb_host_info_store(struct device *dev, +static ssize_t cbaf_wusb_chid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { @@ -308,8 +332,7 @@ static ssize_t cbaf_wusb_host_info_store(struct device *dev, "%02hhx %02hhx %02hhx %02hhx " "%02hhx %02hhx %02hhx %02hhx " "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " - "%04hx %64s\n", + "%02hhx %02hhx %02hhx %02hhx", &cbaf->chid.data[0] , &cbaf->chid.data[1], &cbaf->chid.data[2] , &cbaf->chid.data[3], &cbaf->chid.data[4] , &cbaf->chid.data[5], @@ -317,24 +340,79 @@ static ssize_t cbaf_wusb_host_info_store(struct device *dev, &cbaf->chid.data[8] , &cbaf->chid.data[9], &cbaf->chid.data[10], &cbaf->chid.data[11], &cbaf->chid.data[12], &cbaf->chid.data[13], - &cbaf->chid.data[14], &cbaf->chid.data[15], - &cbaf->host_band_groups, cbaf->host_name); - if (result != 18) { - dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits, " - "a 16 bit hex band group mask " - "and a host name, got only %d)\n", (int)result); + &cbaf->chid.data[14], &cbaf->chid.data[15]); + + if (result != 16) return -EINVAL; - } + result = cbaf_send_host_info(cbaf); if (result < 0) - dev_err(dev, "Couldn't send host information to device: %d\n", - (int)result); - else - d_printf(1, dev, "HI sent, wusb_cc can be read now\n"); - return result < 0 ? result : size; + return result; + result = cbaf_cdid_get(cbaf); + if (result < 0) + return -result; + return size; +} +static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store); + +static ssize_t cbaf_wusb_host_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name); +} + +static ssize_t cbaf_wusb_host_name_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + ssize_t result; + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + result = sscanf(buf, "%63s", cbaf->host_name); + if (result != 1) + return -EINVAL; + + return size; +} +static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show, + cbaf_wusb_host_name_store); + +static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups); +} + +static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + ssize_t result; + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + u16 band_groups = 0; + + result = sscanf(buf, "%04hx", &band_groups); + if (result != 1) + return -EINVAL; + + cbaf->host_band_groups = band_groups; + + return size; } -static DEVICE_ATTR(wusb_host_info, 0600, cbaf_wusb_host_info_show, - cbaf_wusb_host_info_store); + +static DEVICE_ATTR(wusb_host_band_groups, 0600, + cbaf_wusb_host_band_groups_show, + cbaf_wusb_host_band_groups_store); static const struct wusb_cbaf_device_info cbaf_device_info_defaults = { .Length_hdr = WUSB_AR_Length, @@ -344,77 +422,72 @@ static const struct wusb_cbaf_device_info cbaf_device_info_defaults = { .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName, }; -/* - * Get device's information (CDID) associated to CHID - * - * The device will return it's information (CDID, name, bandgroups) - * associated to the CHID we have set before, or 0 CDID and default - * name and bandgroup if no CHID set or unknown. - */ -static int cbaf_cdid_get(struct cbaf *cbaf) +static ssize_t cbaf_wusb_cdid_show(struct device *dev, + struct device_attribute *attr, char *buf) { - int result; - struct device *dev = &cbaf->usb_iface->dev; - struct wusb_cbaf_device_info *di; - size_t needed, dev_name_size; + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + char pr_cdid[WUSB_CKHDID_STRSIZE]; - di = cbaf->buffer; - result = usb_control_msg( - cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0), - CBAF_REQ_GET_ASSOCIATION_REQUEST, - USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, - di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */); - if (result < 0) { - dev_err(dev, "Cannot request device information: %d\n", result); - goto error_req_di; - } - needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length); - if (result < needed) { - dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs " - "%zu bytes needed)\n", (size_t)result, needed); - goto error_bad_di; - } - cbaf->cdid = di->CDID; - dev_name_size = le16_to_cpu(di->DeviceFriendlyName_hdr.len); - dev_name_size = dev_name_size > 65 - 1 ? 65 - 1 : dev_name_size; - memcpy(cbaf->device_name, di->DeviceFriendlyName, dev_name_size); - cbaf->device_name[dev_name_size] = 0; - cbaf->device_band_groups = le16_to_cpu(di->BandGroups); - result = 0; -error_req_di: -error_bad_di: - return result; + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid); + return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid); } -/* - * Get device information and print it to sysfs - * - * See cbaf_cdid_get() - */ -static ssize_t cbaf_wusb_cdid_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t cbaf_wusb_cdid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) { ssize_t result; struct usb_interface *iface = to_usb_interface(dev); struct cbaf *cbaf = usb_get_intfdata(iface); - char pr_cdid[WUSB_CKHDID_STRSIZE]; + struct wusb_ckhdid cdid; - result = cbaf_cdid_get(cbaf); - if (result < 0) { - dev_err(dev, "Cannot read device information: %d\n", - (int)result); - goto error_get_di; - } - ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid); - result = scnprintf(buf, PAGE_SIZE, - "CDID: %s\nName: %s\nBand_groups: 0x%04x\n", - pr_cdid, cbaf->device_name, - cbaf->device_band_groups); -error_get_di: - return result; + result = sscanf(buf, + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx " + "%02hhx %02hhx %02hhx %02hhx", + &cdid.data[0] , &cdid.data[1], + &cdid.data[2] , &cdid.data[3], + &cdid.data[4] , &cdid.data[5], + &cdid.data[6] , &cdid.data[7], + &cdid.data[8] , &cdid.data[9], + &cdid.data[10], &cdid.data[11], + &cdid.data[12], &cdid.data[13], + &cdid.data[14], &cdid.data[15]); + if (result != 16) + return -EINVAL; + + cbaf->cdid = cdid; + + return size; +} +static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store); + +static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups); +} + +static DEVICE_ATTR(wusb_device_band_groups, 0600, + cbaf_wusb_device_band_groups_show, + NULL); + +static ssize_t cbaf_wusb_device_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_interface *iface = to_usb_interface(dev); + struct cbaf *cbaf = usb_get_intfdata(iface); + + return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name); } -static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, NULL); +static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL); static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = { .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId, @@ -435,9 +508,7 @@ static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = { }; /* - * Send a new CC to the device - * - * So we update the CK and send the whole thing to the device + * Send a new CC to the device. */ static int cbaf_cc_upload(struct cbaf *cbaf) { @@ -452,30 +523,25 @@ static int cbaf_cc_upload(struct cbaf *cbaf) ccd->CDID = cbaf->cdid; ccd->CK = cbaf->ck; ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups); + + dev_dbg(dev, "Trying to upload CC:\n"); + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID); + dev_dbg(dev, " CHID %s\n", pr_cdid); + ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID); + dev_dbg(dev, " CDID %s\n", pr_cdid); + dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups); + result = usb_control_msg( cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0), CBAF_REQ_SET_ASSOCIATION_RESPONSE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber, ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */); - d_printf(1, dev, "Uploaded CC:\n"); - ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID); - d_printf(1, dev, " CHID %s\n", pr_cdid); - ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID); - d_printf(1, dev, " CDID %s\n", pr_cdid); - ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CK); - d_printf(1, dev, " CK %s\n", pr_cdid); - d_printf(1, dev, " bandgroups 0x%04x\n", cbaf->host_band_groups); + return result; } -/* - * Send a new CC to the device - * - * We take the CDID and CK from user space, the rest from the info we - * set with host_info. - */ -static ssize_t cbaf_wusb_cc_store(struct device *dev, +static ssize_t cbaf_wusb_ck_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { @@ -484,23 +550,10 @@ static ssize_t cbaf_wusb_cc_store(struct device *dev, struct cbaf *cbaf = usb_get_intfdata(iface); result = sscanf(buf, - "CDID: %02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx " "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx\n" - "CK: %02hhx %02hhx %02hhx %02hhx " "%02hhx %02hhx %02hhx %02hhx " "%02hhx %02hhx %02hhx %02hhx " - "%02hhx %02hhx %02hhx %02hhx\n", - &cbaf->cdid.data[0] , &cbaf->cdid.data[1], - &cbaf->cdid.data[2] , &cbaf->cdid.data[3], - &cbaf->cdid.data[4] , &cbaf->cdid.data[5], - &cbaf->cdid.data[6] , &cbaf->cdid.data[7], - &cbaf->cdid.data[8] , &cbaf->cdid.data[9], - &cbaf->cdid.data[10], &cbaf->cdid.data[11], - &cbaf->cdid.data[12], &cbaf->cdid.data[13], - &cbaf->cdid.data[14], &cbaf->cdid.data[15], - + "%02hhx %02hhx %02hhx %02hhx", &cbaf->ck.data[0] , &cbaf->ck.data[1], &cbaf->ck.data[2] , &cbaf->ck.data[3], &cbaf->ck.data[4] , &cbaf->ck.data[5], @@ -509,25 +562,25 @@ static ssize_t cbaf_wusb_cc_store(struct device *dev, &cbaf->ck.data[10], &cbaf->ck.data[11], &cbaf->ck.data[12], &cbaf->ck.data[13], &cbaf->ck.data[14], &cbaf->ck.data[15]); - if (result != 32) { - dev_err(dev, "Unrecognized CHID/CK (need 32 8-bit " - "hex digits, got only %d)\n", (int)result); + if (result != 16) return -EINVAL; - } + result = cbaf_cc_upload(cbaf); if (result < 0) - dev_err(dev, "Couldn't upload connection context: %d\n", - (int)result); - else - d_printf(1, dev, "Connection context uploaded\n"); - return result < 0 ? result : size; + return result; + + return size; } -static DEVICE_ATTR(wusb_cc, 0600, NULL, cbaf_wusb_cc_store); +static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store); static struct attribute *cbaf_dev_attrs[] = { - &dev_attr_wusb_host_info.attr, + &dev_attr_wusb_host_name.attr, + &dev_attr_wusb_host_band_groups.attr, + &dev_attr_wusb_chid.attr, &dev_attr_wusb_cdid.attr, - &dev_attr_wusb_cc.attr, + &dev_attr_wusb_device_name.attr, + &dev_attr_wusb_device_band_groups.attr, + &dev_attr_wusb_ck.attr, NULL, }; @@ -539,32 +592,33 @@ static struct attribute_group cbaf_dev_attr_group = { static int cbaf_probe(struct usb_interface *iface, const struct usb_device_id *id) { - int result; struct cbaf *cbaf; struct device *dev = &iface->dev; + int result = -ENOMEM; - result = -ENOMEM; cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL); - if (cbaf == NULL) { - dev_err(dev, "Unable to allocate instance\n"); + if (cbaf == NULL) goto error_kzalloc; - } cbaf->buffer = kmalloc(512, GFP_KERNEL); if (cbaf->buffer == NULL) goto error_kmalloc_buffer; + cbaf->buffer_size = 512; cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface)); cbaf->usb_iface = usb_get_intf(iface); result = cbaf_check(cbaf); - if (result < 0) + if (result < 0) { + dev_err(dev, "This device is not WUSB-CBAF compliant" + "and is not supported yet.\n"); goto error_check; + } + result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group); if (result < 0) { dev_err(dev, "Can't register sysfs attr group: %d\n", result); goto error_create_group; } usb_set_intfdata(iface, cbaf); - d_printf(2, dev, "CBA attached\n"); return 0; error_create_group: @@ -587,7 +641,6 @@ static void cbaf_disconnect(struct usb_interface *iface) /* paranoia: clean up crypto keys */ memset(cbaf, 0, sizeof(*cbaf)); kfree(cbaf); - d_printf(1, dev, "CBA detached\n"); } static struct usb_device_id cbaf_id_table[] = { -- cgit v0.10.2 From 6a7c3e464eb75310d011a6f2ea2953e6f5d91d55 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:43 +0100 Subject: uwb: depend on EXPERIMENTAL The UWB stack has some sysfs APIs that will change thus it's best marked as EXPERIMENTAL until these APIs are finalized. Signed-off-by: David Vrabel diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 5150441..72fb655 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -307,7 +307,8 @@ config SUPERH_ON_CHIP_R8A66597 SH7366 and SH7723 processors. config USB_WHCI_HCD - tristate "Wireless USB Host Controller Interface (WHCI) driver" + tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" + depends on EXPERIMENTAL depends on PCI && USB select USB_WUSB select UWB_WHCI @@ -319,7 +320,8 @@ config USB_WHCI_HCD will be called "whci-hcd". config USB_HWA_HCD - tristate "Host Wire Adapter (HWA) driver" + tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)" + depends on EXPERIMENTAL depends on USB select USB_WUSB select UWB_HWA diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig index 4ea7669..eb09a0a 100644 --- a/drivers/usb/wusbcore/Kconfig +++ b/drivers/usb/wusbcore/Kconfig @@ -2,7 +2,8 @@ # Wireless USB Core configuration # config USB_WUSB - tristate "Enable Wireless USB extensions" + tristate "Enable Wireless USB extensions (EXPERIMENTAL)" + depends on EXPERIMENTAL depends on USB select UWB select CRYPTO diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig index 317e9fe..ca78312 100644 --- a/drivers/uwb/Kconfig +++ b/drivers/uwb/Kconfig @@ -3,7 +3,8 @@ # menuconfig UWB - tristate "Ultra Wide Band devices" + tristate "Ultra Wideband devices (EXPERIMENTAL)" + depends on EXPERIMENTAL depends on PCI default n help -- cgit v0.10.2 From 18332a80c3e2000336ed2cb7ef9a1c6a3991ab61 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 17 Sep 2008 16:34:44 +0100 Subject: uwb: add entries in the MAINTAINERS file Add entries for the UWB, WUSB, and WLP subsystems to the MAINTAINERS file. Signed-off-by: David Vrabel diff --git a/MAINTAINERS b/MAINTAINERS index 0a613cb..63458274 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1048,6 +1048,12 @@ L: cbe-oss-dev@ozlabs.org W: http://www.ibm.com/developerworks/power/cell/ S: Supported +CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: +P: David Vrabel +M: david.vrabel@csr.com +L: linux-usb@vger.kernel.org +S: Supported + CFAG12864B LCD DRIVER P: Miguel Ojeda Sandonis M: miguel.ojeda.sandonis@gmail.com @@ -4137,6 +4143,12 @@ L: sparclinux@vger.kernel.org T: git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git S: Maintained +ULTRA-WIDEBAND (UWB) SUBSYSTEM: +P: David Vrabel +M: david.vrabel@csr.com +L: linux-usb@vger.kernel.org +S: Supported + UNIFORM CDROM DRIVER P: Jens Axboe M: axboe@kernel.dk @@ -4532,6 +4544,11 @@ M: zaga@fly.cc.fer.hr L: linux-scsi@vger.kernel.org S: Maintained +WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM +P: David Vrabel +M: david.vrabel@csr.com +S: Maintained + WISTRON LAPTOP BUTTON DRIVER P: Miloslav Trmac M: mitr@volny.cz -- cgit v0.10.2 From edfa042c880f62848aa55b3e71e538fe383929da Mon Sep 17 00:00:00 2001 From: Inaky Perez-Gonzalez Date: Wed, 17 Sep 2008 16:34:45 +0100 Subject: uwb: add credits for the original developers of the UWB/WUSB/WLP subsystems Signed-off-by: Inaky Perez-Gonzalez Signed-off-by: David Vrabel diff --git a/CREDITS b/CREDITS index c62dcb3..4fad671 100644 --- a/CREDITS +++ b/CREDITS @@ -598,6 +598,11 @@ S: Tamsui town, Taipei county, S: Taiwan 251 S: Republic of China +N: Reinette Chatre +E: reinette.chatre@intel.com +D: WiMedia Link Protocol implementation +D: UWB stack bits and pieces + N: Michael Elizabeth Chastain E: mec@shout.net D: Configure, Menuconfig, xconfig @@ -2695,6 +2700,12 @@ S: Demonstratsii 8-382 S: Tula 300000 S: Russia +N: Inaky Perez-Gonzalez +E: inaky.perez-gonzalez@intel.com +D: UWB stack, HWA-RC driver and HWA-HC drivers +D: Wireless USB additions to the USB stack +D: WiMedia Link Protocol bits and pieces + N: Gordon Peters E: GordPeters@smarttech.com D: Isochronous receive for IEEE 1394 driver (OHCI module). -- cgit v0.10.2 From 6b3141962dc82cfe1c30afdf91d564b309859cbe Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 19 Sep 2008 04:16:19 -0700 Subject: dmatest: properly handle duplicate DMA channels Update the the dmatest driver so that it handles duplicate DMA channels properly. When a DMA client is notified of an available DMA channel, it must check if it has already allocated resources for that channel. If so, it should return DMA_DUP. This can happen, for example, if a DMA driver calls dma_async_device_register() more than once. Acked-by: Haavard Skinnemoen Signed-off-by: Timur Tabi Signed-off-by: Dan Williams diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a08d197..422500c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -325,6 +325,11 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) struct dmatest_thread *thread; unsigned int i; + /* Have we already been told about this channel? */ + list_for_each_entry(dtc, &dmatest_channels, node) + if (dtc->chan == chan) + return DMA_DUP; + dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); if (!dtc) { pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); -- cgit v0.10.2 From 6fdb8bd47111d3f94be221082b725ec2dec1d5c7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 19 Sep 2008 04:16:23 -0700 Subject: drivers/dma/dmatest.c: switch a GFP_ATOMIC to GFP_KERNEL It was needlessly using the unreliable GFP_ATOMIC. Cc: Timur Tabi Acked-by: Haavard Skinnemoen Signed-off-by: Andrew Morton Signed-off-by: Dan Williams diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 422500c..d1e381e 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -330,7 +330,7 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) if (dtc->chan == chan) return DMA_DUP; - dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); + dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); return DMA_NAK; -- cgit v0.10.2 From b817f7e020958c8f79842076c137daa6f72eb366 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 20 Sep 2008 20:16:35 +0900 Subject: sh: Disable 4kB stacks when using PAGE_SIZE_64KB. This combination triggers a divide by zero in kernel/fork.c when calculating the initial max_threads value: max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); Simply disable 4K stacks on 64kB PAGE_SIZE to work around this, as it's not a terribly useful combination to begin with. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 4d2d102..e6d2c8b 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -82,7 +82,7 @@ config DEBUG_STACK_USAGE config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" - depends on DEBUG_KERNEL && (MMU || BROKEN) + depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB help If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates -- cgit v0.10.2 From c15c5f8c2bf0b00d036c5c6b67264764a6e5dffc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 20 Sep 2008 20:21:33 +0900 Subject: sh: Support kernel stacks smaller than a page. This follows the powerpc commit f6a616800e68b61807d0f7bb0d5dc70665ef8046 '[POWERPC] Fix kernel stack allocation alignment'. SH has traditionally forced the thread order to be relative to the page size, so there were never any situations where the same bug was triggered by slub. Regardless, the usage of > 8kB stacks for the larger page sizes is overkill, so we switch to using slab allocations there, as per the powerpc change. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 0a894ca..f09ac48 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -33,20 +33,12 @@ struct thread_info { #define PREEMPT_ACTIVE 0x10000000 #if defined(CONFIG_4KSTACKS) -#define THREAD_SIZE_ORDER (0) -#elif defined(CONFIG_PAGE_SIZE_4KB) -#define THREAD_SIZE_ORDER (1) -#elif defined(CONFIG_PAGE_SIZE_8KB) -#define THREAD_SIZE_ORDER (1) -#elif defined(CONFIG_PAGE_SIZE_16KB) -#define THREAD_SIZE_ORDER (0) -#elif defined(CONFIG_PAGE_SIZE_64KB) -#define THREAD_SIZE_ORDER (0) +#define THREAD_SHIFT 12 #else -#error "Unknown thread size" +#define THREAD_SHIFT 13 #endif -#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_SIZE (1 << THREAD_SHIFT) #define STACK_WARN (THREAD_SIZE >> 3) /* @@ -94,15 +86,19 @@ static inline struct thread_info *current_thread_info(void) return ti; } +/* thread information allocation */ +#if THREAD_SHIFT >= PAGE_SHIFT + +#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) + +#else /* THREAD_SHIFT < PAGE_SHIFT */ + #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR -/* thread information allocation */ -#ifdef CONFIG_DEBUG_STACK_USAGE -#define alloc_thread_info(ti) kzalloc(THREAD_SIZE, GFP_KERNEL) -#else -#define alloc_thread_info(ti) kmalloc(THREAD_SIZE, GFP_KERNEL) -#endif -#define free_thread_info(ti) kfree(ti) +extern struct thread_info *alloc_thread_info(struct task_struct *tsk); +extern void free_thread_info(struct thread_info *ti); + +#endif /* THREAD_SHIFT < PAGE_SHIFT */ #endif /* __ASSEMBLY__ */ diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 31211bf..2a53943 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -265,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif +#if THREAD_SHIFT < PAGE_SHIFT +static struct kmem_cache *thread_info_cache; + +struct thread_info *alloc_thread_info(struct task_struct *tsk) +{ + struct thread_info *ti; + + ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); + if (unlikely(ti == NULL)) + return NULL; +#ifdef CONFIG_DEBUG_STACK_USAGE + memset(ti, 0, THREAD_SIZE); +#endif + return ti; +} + +void free_thread_info(struct thread_info *ti) +{ + kmem_cache_free(thread_info_cache, ti); +} + +void thread_info_cache_init(void) +{ + thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, + THREAD_SIZE, 0, NULL); + BUG_ON(thread_info_cache == NULL); +} +#endif /* THREAD_SHIFT < PAGE_SHIFT */ + #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { -- cgit v0.10.2 From 837c946aad480d4773619707f115ed4c15738f77 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 10:24:18 +0900 Subject: sh: Copy in asm/sizes.h helper from ARM. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h new file mode 100644 index 0000000..503843d --- /dev/null +++ b/arch/sh/include/asm/sizes.h @@ -0,0 +1,56 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* DO NOT EDIT!! - this file automatically generated + * from .s file by awk -f s2h.awk + */ +/* Size definitions + * Copyright (C) ARM Limited 1998. All rights reserved. + */ + +#ifndef __sizes_h +#define __sizes_h 1 + +/* handy sizes */ +#define SZ_16 0x00000010 +#define SZ_256 0x00000100 +#define SZ_512 0x00000200 + +#define SZ_1K 0x00000400 +#define SZ_4K 0x00001000 +#define SZ_8K 0x00002000 +#define SZ_16K 0x00004000 +#define SZ_64K 0x00010000 +#define SZ_128K 0x00020000 +#define SZ_256K 0x00040000 +#define SZ_512K 0x00080000 + +#define SZ_1M 0x00100000 +#define SZ_2M 0x00200000 +#define SZ_4M 0x00400000 +#define SZ_8M 0x00800000 +#define SZ_16M 0x01000000 +#define SZ_32M 0x02000000 +#define SZ_64M 0x04000000 +#define SZ_128M 0x08000000 +#define SZ_256M 0x10000000 +#define SZ_512M 0x20000000 + +#define SZ_1G 0x40000000 +#define SZ_2G 0x80000000 + +#endif + +/* END */ -- cgit v0.10.2 From d3ea00a36da2e715217f0e31944dd220deefa38c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 10:31:57 +0900 Subject: sh: Add a few more definitions to asm/sizes.h. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h index 503843d..3a1fb97 100644 --- a/arch/sh/include/asm/sizes.h +++ b/arch/sh/include/asm/sizes.h @@ -25,6 +25,9 @@ /* handy sizes */ #define SZ_16 0x00000010 +#define SZ_32 0x00000020 +#define SZ_64 0x00000040 +#define SZ_128 0x00000080 #define SZ_256 0x00000100 #define SZ_512 0x00000200 @@ -32,6 +35,7 @@ #define SZ_4K 0x00001000 #define SZ_8K 0x00002000 #define SZ_16K 0x00004000 +#define SZ_32K 0x00008000 #define SZ_64K 0x00010000 #define SZ_128K 0x00020000 #define SZ_256K 0x00040000 @@ -42,6 +46,7 @@ #define SZ_4M 0x00400000 #define SZ_8M 0x00800000 #define SZ_16M 0x01000000 +#define SZ_26M 0x01a00000 #define SZ_32M 0x02000000 #define SZ_64M 0x04000000 #define SZ_128M 0x08000000 -- cgit v0.10.2 From 347cd34f4b32be30d2a6d92fe4d6eac04b00a637 Mon Sep 17 00:00:00 2001 From: Luca Santini Date: Sun, 21 Sep 2008 10:32:29 +0900 Subject: sh: edosk7760: Correct size of bootloader flash partition. This is 256K instead of 1M. [ Converted to use asm/sizes.h. -- PFM ] Signed-off-by: Luca Santini Signed-off-by: Paul Mundt diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c index 4890ba7..35dc099 100644 --- a/arch/sh/boards/board-edosk7760.c +++ b/arch/sh/boards/board-edosk7760.c @@ -30,6 +30,7 @@ #include #include #include +#include /* Bus state controller registers for CS4 area */ #define BSC_CS4BCR 0xA4FD0010 @@ -46,16 +47,16 @@ static struct mtd_partition edosk7760_nor_flash_partitions[] = { { .name = "bootloader", .offset = 0, - .size = (1 * 1024 * 1024), /*1MB*/ + .size = SZ_256K, .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, - .size = (2 * 1024 * 1024), /*2MB*/ + .size = SZ_2M, }, { .name = "fs", .offset = MTDPART_OFS_APPEND, - .size = (26 * 1024 * 1024), + .size = SZ_26M, }, { .name = "other", .offset = MTDPART_OFS_APPEND, @@ -73,7 +74,7 @@ static struct resource edosk7760_nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, - .end = (32 * 1024 * 1024) -1, /* 32MB*/ + .end = 0x00000000 + SZ_32M - 1, .flags = IORESOURCE_MEM, } }; @@ -145,7 +146,7 @@ static struct smc91x_platdata smc91x_info = { static struct resource smc91x_res[] = { [0] = { .start = SMC_IOADDR, - .end = SMC_IOADDR + 0x1f, + .end = SMC_IOADDR + SZ_32 - 1, .flags = IORESOURCE_MEM, }, [1] = { -- cgit v0.10.2 From 4c59e2942e92d2d776bcd038604a5c3c1d56d3ac Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 12:00:23 +0900 Subject: sh: Move lookup_exception_vector() out to asm/system_32.h. There are other places where we want to have access to the trap/exception number, so move out the lookup_exception_vector() helper. While we're at it, refactor it slightly to return the vector instead. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index f7f1056..a726d5d 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -97,6 +97,31 @@ do { \ : "=&r" (__dummy)); \ } while (0) +#ifdef CONFIG_CPU_HAS_SR_RB +#define lookup_exception_vector() \ +({ \ + unsigned long _vec; \ + \ + __asm__ __volatile__ ( \ + "stc r2_bank, %0\n\t" \ + : "=r" (_vec) \ + ); \ + \ + _vec; \ +}) +#else +#define lookup_exception_vector() \ +({ \ + unsigned long _vec; \ + __asm__ __volatile__ ( \ + "mov r4, %0\n\t" \ + : "=r" (_vec) \ + ); \ + \ + _vec; \ +}) +#endif + int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, struct mem_access *ma); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 35b901e..b359b08 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -514,14 +514,6 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, return ret; } -#ifdef CONFIG_CPU_HAS_SR_RB -#define lookup_exception_vector(x) \ - __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) -#else -#define lookup_exception_vector(x) \ - __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) -#endif - /* * Handle various address error exceptions: * - instruction address error: @@ -545,7 +537,7 @@ asmlinkage void do_address_error(struct pt_regs *regs, /* Intentional ifdef */ #ifdef CONFIG_CPU_HAS_SR_RB - lookup_exception_vector(error_code); + error_code = lookup_exception_vector(); #endif oldfs = get_fs(); @@ -686,7 +678,7 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, } #endif - lookup_exception_vector(error_code); + error_code = lookup_exception_vector(); local_irq_enable(); CHK_REMOTE_DEBUG(regs); @@ -759,7 +751,7 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, /* not a FPU inst. */ #endif - lookup_exception_vector(inst); + inst = lookup_exception_vector(); local_irq_enable(); CHK_REMOTE_DEBUG(regs); @@ -774,7 +766,7 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, struct pt_regs *regs = RELOC_HIDE(&__regs, 0); long ex; - lookup_exception_vector(ex); + ex = lookup_exception_vector(); die_if_kernel("exception", regs, ex); } -- cgit v0.10.2 From 887f1ae3bc1701604a7b5ef145e1021072675444 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 12:06:43 +0900 Subject: sh: Look up the trap vector for the page fault notifier. Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 659811c..ef01f45 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -21,26 +21,21 @@ #include #include -#ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; +#ifdef CONFIG_KPROBES if (!user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; preempt_enable(); } +#endif return ret; } -#else -static inline int notify_page_fault(struct pt_regs *regs, int trap) -{ - return 0; -} -#endif /* * This routine handles page faults. It determines the address, @@ -58,7 +53,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, int fault; siginfo_t info; - if (notify_page_fault(regs, writeaccess)) + if (notify_page_fault(regs, lookup_exception_vector())) return; #ifdef CONFIG_SH_KGDB @@ -293,7 +288,7 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, pte_t *pte; pte_t entry; - if (notify_page_fault(regs, writeaccess)) + if (notify_page_fault(regs, lookup_exception_vector())) return 0; #ifdef CONFIG_SH_KGDB -- cgit v0.10.2 From 8f2baee28093ea77c7cc8da45049fd94cc76998e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 12:11:25 +0900 Subject: sh: Kill off duplicate page fault notifiers in slow path. We already have hooks in place in the __do_page_fault() fast-path, so kill them off in the slow path. Signed-off-by: Paul Mundt diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index ef01f45..08a08ea 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -53,13 +53,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, int fault; siginfo_t info; - if (notify_page_fault(regs, lookup_exception_vector())) - return; - -#ifdef CONFIG_SH_KGDB - if (kgdb_nofault && kgdb_bus_err_hook) - kgdb_bus_err_hook(); -#endif + /* + * We don't bother with any notifier callbacks here, as they are + * all handled through the __do_page_fault() fast-path. + */ tsk = current; si_code = SEGV_MAPERR; -- cgit v0.10.2 From 3d58695edbfac785161bf282dc11fd42a483d6c9 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 13:56:39 +0900 Subject: sh: Trivial trace_mark() instrumentation for core events. This implements a few trace points across events that are deemed interesting. This implements a number of trace points: - The page fault handler / TLB miss - IPC calls - Kernel thread creation The original LTTng patch had the slow-path instrumented, which fails to account for the vast majority of events. In general placing this in the fast-path is not a huge performance hit, as we don't take page faults for kernel addresses. The other bits of interest are some of the other trap handlers, as well as the syscall entry/exit (which is better off being handled through the tracehook API). Most of the other trap handlers are corner cases where alternate means of notification exist, so there is little value in placing extra trace points in these locations. Based on top of the points provided both by the LTTng instrumentation patch as well as the patch shipping in the ST-Linux tree, albeit in a stripped down form. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 914e543..7b013aa 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -169,6 +169,7 @@ __asm__(".align 5\n" int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; + int pid; memset(®s, 0, sizeof(regs)); regs.regs[4] = (unsigned long)arg; @@ -178,8 +179,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) regs.sr = (1 << 30); /* Ok, create the new process.. */ - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, - ®s, 0, NULL, NULL); + pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, + ®s, 0, NULL, NULL); + + trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn); + + return pid; } /* diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index d0dddc4..b7aa092 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -396,6 +396,7 @@ ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; + int pid; memset(®s, 0, sizeof(regs)); regs.regs[2] = (unsigned long)arg; @@ -404,8 +405,13 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) regs.pc = (unsigned long)kernel_thread_helper; regs.sr = (1 << 30); - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, - ®s, 0, NULL, NULL); + /* Ok, create the new process.. */ + pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, + ®s, 0, NULL, NULL); + + trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn); + + return pid; } /* diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 0dfb889..38f098c 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -171,6 +171,8 @@ asmlinkage int sys_ipc(uint call, int first, int second, version = call >> 16; /* hack for backward compatibility */ call &= 0xffff; + trace_mark(kernel_arch_ipc_call, "call %u first %d", call, first); + if (call <= SEMTIMEDOP) switch (call) { case SEMOP: diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 08a08ea..898d477 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -15,28 +15,13 @@ #include #include #include +#include #include #include #include #include #include -static inline int notify_page_fault(struct pt_regs *regs, int trap) -{ - int ret = 0; - -#ifdef CONFIG_KPROBES - if (!user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, trap)) - ret = 1; - preempt_enable(); - } -#endif - - return ret; -} - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -261,6 +246,25 @@ do_sigbus: goto no_context; } +static inline int notify_page_fault(struct pt_regs *regs, int trap) +{ + int ret = 0; + + trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld", + trap >> 5, instruction_pointer(regs)); + +#ifdef CONFIG_KPROBES + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, trap)) + ret = 1; + preempt_enable(); + } +#endif + + return ret; +} + #ifdef CONFIG_SH_STORE_QUEUES /* * This is a special case for the SH-4 store queues, as pages for this @@ -284,15 +288,18 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, pmd_t *pmd; pte_t *pte; pte_t entry; + int ret = 0; if (notify_page_fault(regs, lookup_exception_vector())) - return 0; + goto out; #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) kgdb_bus_err_hook(); #endif + ret = 1; + /* * We don't take page faults for P1, P2, and parts of P4, these * are always mapped, whether it be due to legacy behaviour in @@ -302,24 +309,23 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, pgd = pgd_offset_k(address); } else { if (unlikely(address >= TASK_SIZE || !current->mm)) - return 1; + goto out; pgd = pgd_offset(current->mm, address); } pud = pud_offset(pgd, address); if (pud_none_or_clear_bad(pud)) - return 1; + goto out; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) - return 1; - + goto out; pte = pte_offset_kernel(pmd, address); entry = *pte; if (unlikely(pte_none(entry) || pte_not_present(entry))) - return 1; + goto out; if (unlikely(writeaccess && !pte_write(entry))) - return 1; + goto out; if (writeaccess) entry = pte_mkdirty(entry); @@ -336,5 +342,8 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, set_pte(pte, entry); update_mmu_cache(NULL, address, entry); - return 0; + ret = 0; +out: + trace_mark(kernel_arch_trap_exit, MARK_NOARGS); + return ret; } -- cgit v0.10.2 From 9d2b1f81dd93b198e12bca8120afec4a7b609b06 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 16:43:45 +0900 Subject: sh: ftrace support. This adds support for ftrace to SH. This only includes CONFIG_FTRACE, and does not handle dynamic ftrace presently. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index adef42c..38a5a9e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -24,6 +24,7 @@ config SUPERH32 select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_ARCH_TRACEHOOK if !SH_FPU + select HAVE_FTRACE config SUPERH64 def_bool y if CPU_SH5 diff --git a/arch/sh/boot/compressed/Makefile_32 b/arch/sh/boot/compressed/Makefile_32 index 47685f6..301e6d5 100644 --- a/arch/sh/boot/compressed/Makefile_32 +++ b/arch/sh/boot/compressed/Makefile_32 @@ -23,6 +23,11 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) +ifeq ($(CONFIG_FTRACE),y) +ORIG_CFLAGS := $(KBUILD_CFLAGS) +KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) +endif + LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index efbb426..1a5cf9d 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -371,3 +371,47 @@ syscall_exit: #endif 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave + +#ifdef CONFIG_FTRACE + .align 2 + .globl _mcount + .type _mcount,@function + .globl mcount + .type mcount,@function +_mcount: +mcount: + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + sts.l pr, @-r15 + + mov.l @(20,r15),r4 + sts pr, r5 + + mov.l 1f, r6 + mov.l ftrace_stub, r7 + cmp/eq r6, r7 + bt skip_trace + + mov.l @r6, r6 + jsr @r6 + nop + +skip_trace: + + lds.l @r15+, pr + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + rts + mov.l @r15+, r4 + + .align 2 +1: .long ftrace_trace_function + + .globl ftrace_stub +ftrace_stub: + rts + nop +#endif /* CONFIG_FTRACE */ diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 6e1b1c2..d917b7b 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -16,6 +16,7 @@ #include #include #include +#include extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); extern struct hw_interrupt_type no_irq_type; @@ -133,6 +134,9 @@ EXPORT_SYMBOL(__flush_purge_region); EXPORT_SYMBOL(clear_user_page); #endif +#ifdef CONFIG_FTRACE +EXPORT_SYMBOL(mcount); +#endif EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); #ifdef CONFIG_IPV6 -- cgit v0.10.2 From 6902aa84f565153ce05f3438ecb8e445d4f468d8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 17:14:42 +0900 Subject: doc: Add remaining SH parameters to kernel-parameters.txt. Signed-off-by: Paul Mundt diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 1150444..abeff96 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -796,6 +796,8 @@ and is between 256 and 4096 characters. It is defined in the file Defaults to the default architecture's huge page size if not specified. + hlt [BUGS=ARM,SH] + i8042.direct [HW] Put keyboard port into non-translated mode i8042.dumbkbd [HW] Pretend that controller can only read data from keyboard and cannot control its state @@ -1206,6 +1208,10 @@ and is between 256 and 4096 characters. It is defined in the file mem=nopentium [BUGS=X86-32] Disable usage of 4MB pages for kernel memory. + memchunk=nn[KMG] + [KNL,SH] Allow user to override the default size for + per-device physically contiguous DMA buffers. + memmap=exactmap [KNL,X86-32,X86_64] Enable setting of an exact E820 memory map, as specified by the user. Such memmap=exactmap lines can be constructed based on @@ -1365,6 +1371,8 @@ and is between 256 and 4096 characters. It is defined in the file nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects. + nodsp [SH] Disable hardware DSP at boot time. + noefi [X86-32,X86-64] Disable EFI runtime services support. noexec [IA-64] @@ -1381,13 +1389,15 @@ and is between 256 and 4096 characters. It is defined in the file noexec32=off: disable non-executable mappings read implies executable mappings + nofpu [SH] Disable hardware FPU at boot time. + nofxsr [BUGS=X86-32] Disables x86 floating point extended register save and restore. The kernel will only save legacy floating-point registers on task switch. noclflush [BUGS=X86] Don't use the CLFLUSH instruction - nohlt [BUGS=ARM] + nohlt [BUGS=ARM,SH] no-hlt [BUGS=X86-32] Tells the kernel that the hlt instruction doesn't work correctly and not to -- cgit v0.10.2 From 4b4cf7595a8bce9b4dd64c241a8cb7336ecb9489 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 17:17:53 +0900 Subject: sh: Add missing asm/ftrace.h. This was missed with the ftrace support commit.. check it in now. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h new file mode 100644 index 0000000..3aed362 --- /dev/null +++ b/arch/sh/include/asm/ftrace.h @@ -0,0 +1,8 @@ +#ifndef __ASM_SH_FTRACE_H +#define __ASM_SH_FTRACE_H + +#ifndef __ASSEMBLY__ +extern void mcount(void); +#endif + +#endif /* __ASM_SH_FTRACE_H */ -- cgit v0.10.2 From e7ab3cd251926d57ee11d7d320e8fb42c882ad22 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 21 Sep 2008 19:04:55 +0900 Subject: sh: Add FPU registers to regset interface. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 38a5a9e..71be7ff 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -23,7 +23,7 @@ config SUPERH32 def_bool !SUPERH64 select HAVE_KPROBES select HAVE_KRETPROBES - select HAVE_ARCH_TRACEHOOK if !SH_FPU + select HAVE_ARCH_TRACEHOOK select HAVE_FTRACE config SUPERH64 diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 6b2cec8..4da3a0b 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -108,10 +108,10 @@ typedef struct user_fpu_struct elf_fpregset_t; #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_SH_FPU) +#ifdef CONFIG_SUPERH32 /* - * Enable dump using regset for general purpose registers, use this as - * the default once the FPU registers are moved over also. + * Enable dump using regset. + * This covers all of general/DSP/FPU regs. */ #define CORE_DUMP_USE_REGSET #endif diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 91462fe..1d3aee0 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -30,8 +30,15 @@ static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs) } #endif +struct user_regset; + extern int do_fpu_inst(unsigned short, struct pt_regs *); +extern int fpregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf); + static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) { preempt_disable(); @@ -50,6 +57,18 @@ static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) preempt_enable(); } +static inline int init_fpu(struct task_struct *tsk) +{ + if (tsk_used_math(tsk)) { + if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) + unlazy_fpu(tsk, task_pt_regs(tsk)); + return 0; + } + + set_stopped_child_used_math(tsk); + return 0; +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_FPU_H */ diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 7b013aa..b965f02 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -7,7 +7,11 @@ * * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2008 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. */ #include #include @@ -222,10 +226,10 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) struct task_struct *tsk = current; fpvalid = !!tsk_used_math(tsk); - if (fpvalid) { - unlazy_fpu(tsk, regs); - memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); - } + if (fpvalid) + fpvalid = !fpregs_get(tsk, NULL, 0, + sizeof(struct user_fpu_struct), + fpu, NULL); #endif return fpvalid; diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 0f44f2b..29ca09d 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -32,6 +32,7 @@ #include #include #include +#include /* * This routine will get a word off of the process kernel stack. @@ -145,6 +146,54 @@ static int genregs_set(struct task_struct *target, return ret; } +#ifdef CONFIG_SH_FPU +int fpregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + + ret = init_fpu(target); + if (ret) + return ret; + + if ((boot_cpu_data.flags & CPU_HAS_FPU)) + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.hard, 0, -1); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.soft, 0, -1); +} + +static int fpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + ret = init_fpu(target); + if (ret) + return ret; + + set_stopped_child_used_math(target); + + if ((boot_cpu_data.flags & CPU_HAS_FPU)) + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.hard, 0, -1); + + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.soft, 0, -1); +} + +static int fpregs_active(struct task_struct *target, + const struct user_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} +#endif + #ifdef CONFIG_SH_DSP static int dspregs_get(struct task_struct *target, const struct user_regset *regset, @@ -194,6 +243,9 @@ static int dspregs_active(struct task_struct *target, */ enum sh_regset { REGSET_GENERAL, +#ifdef CONFIG_SH_FPU + REGSET_FPU, +#endif #ifdef CONFIG_SH_DSP REGSET_DSP, #endif @@ -214,6 +266,18 @@ static const struct user_regset sh_regsets[] = { .set = genregs_set, }, +#ifdef CONFIG_SH_FPU + [REGSET_FPU] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpu_struct) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .get = fpregs_get, + .set = fpregs_set, + .active = fpregs_active, + }, +#endif + #ifdef CONFIG_SH_DSP [REGSET_DSP] = { .n = sizeof(struct pt_dspregs) / sizeof(long), @@ -304,6 +368,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) REGSET_GENERAL, 0, sizeof(struct pt_regs), (const void __user *)data); +#ifdef CONFIG_SH_FPU + case PTRACE_GETFPREGS: + return copy_regset_to_user(child, &user_sh_native_view, + REGSET_FPU, + 0, sizeof(struct user_fpu_struct), + (void __user *)data); + case PTRACE_SETFPREGS: + return copy_regset_from_user(child, &user_sh_native_view, + REGSET_FPU, + 0, sizeof(struct user_fpu_struct), + (const void __user *)data); +#endif #ifdef CONFIG_SH_DSP case PTRACE_GETDSPREGS: return copy_regset_to_user(child, &user_sh_native_view, -- cgit v0.10.2 From 45f197ade73ba95681b9803680c75352fc0a1c0a Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 20 Sep 2008 12:58:40 +0200 Subject: x86, oprofile: BUG: using smp_processor_id() in preemptible code Add __raw access before setting per cpu variable switch_index, to avoid the following BUG: [ 449.166827] BUG: using smp_processor_id() in preemptible [00000000] code: modprobe/6998 [ 449.166848] caller is op_nmi_init+0xf0/0x2b0 [oprofile] [ 449.166855] Pid: 6998, comm: modprobe Not tainted 2.6.27-rc5-mm1 #29 [ 449.166860] Call Trace: [ 449.166872] [] debug_smp_processor_id+0xd7/0xe0 [ 449.166887] [] op_nmi_init+0xf0/0x2b0 [oprofile] [ 449.166902] [] oprofile_init+0x0/0x60 [oprofile] [ 449.166915] [] oprofile_arch_init+0x9/0x30 [oprofile] [ 449.166928] [] oprofile_init+0x1e/0x60 [oprofile] [ 449.166937] [] _stext+0x3b/0x160 [ 449.166946] [] __mutex_unlock_slowpath+0xe5/0x190 [ 449.166955] [] trace_hardirqs_on_caller+0xca/0x140 [ 449.166965] [] sys_init_module+0xdc/0x210 [ 449.166972] [] system_call_fastpath+0x16/0x1b Signed-off-by: Andrea Righi Acked-by: Robert Richter Signed-off-by: Ingo Molnar diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index fb4902b..4108d02 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -551,7 +551,7 @@ int __init op_nmi_init(struct oprofile_operations *ops) } /* default values, can be overwritten by model */ - __get_cpu_var(switch_index) = 0; + __raw_get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; -- cgit v0.10.2 From d7cfb60c5cf904ecf1e0ae23ec178175b86f0d4a Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Fri, 19 Sep 2008 13:13:44 +0100 Subject: hrtimer: remove hrtimer_clock_base::get_softirq_time() Peter Zijlstra noticed this 8 months ago and I just noticed it again. hrtimer_clock_base::get_softirq_time() is currently unused in the entire tree. In fact, looking at the logs, it appears as if it was never used. Remove it. Signed-off-by: Mark McLoughlin Signed-off-by: Ingo Molnar diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce..1b079bd 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -145,7 +145,6 @@ struct hrtimer_sleeper { * @first: pointer to the timer node which expires first * @resolution: the resolution of the clock, in nanoseconds * @get_time: function to retrieve the current time of the clock - * @get_softirq_time: function to retrieve the current time from the softirq * @softirq_time: the time when running the hrtimer queue in the softirq * @offset: offset of this clock to the monotonic base * @reprogram: function to reprogram the timer event @@ -157,7 +156,6 @@ struct hrtimer_clock_base { struct rb_node *first; ktime_t resolution; ktime_t (*get_time)(void); - ktime_t (*get_softirq_time)(void); ktime_t softirq_time; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t offset; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 03ea137..4d761d5 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1401,9 +1401,7 @@ void hrtimer_run_queues(void) if (!base->first) continue; - if (base->get_softirq_time) - base->softirq_time = base->get_softirq_time(); - else if (gettime) { + if (gettime) { hrtimer_get_softirq_time(cpu_base); gettime = 0; } -- cgit v0.10.2 From b91c4996df56fcd201f85c392a1de7bc3f6641f5 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Fri, 19 Sep 2008 13:13:48 +0100 Subject: hrtimer: remove hrtimer_clock_base::reprogram() hrtimer_clock_base::reprogram() also appears to never have been used, so remove it. Signed-off-by: Mark McLoughlin Signed-off-by: Ingo Molnar diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1b079bd..68b0196 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -147,7 +147,6 @@ struct hrtimer_sleeper { * @get_time: function to retrieve the current time of the clock * @softirq_time: the time when running the hrtimer queue in the softirq * @offset: offset of this clock to the monotonic base - * @reprogram: function to reprogram the timer event */ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; @@ -159,9 +158,6 @@ struct hrtimer_clock_base { ktime_t softirq_time; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t offset; - int (*reprogram)(struct hrtimer *t, - struct hrtimer_clock_base *b, - ktime_t n); #endif }; -- cgit v0.10.2 From a0ad05c75aa362c91f4d9cd91ff375a739574dd8 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Mon, 1 Sep 2008 14:27:02 +0200 Subject: Introduce FW_BUG, FW_WARN and FW_INFO to consistenly tell users about BIOS bugs The idea is to add this to printk after the severity: printk(KERN_ERR FW_BUG "This is not our fault, BIOS developer: fix it by simply add ...\n"); If a Firmware issue should be hidden, because it is work-arounded, but you still want to see something popping up e.g. for info only: printk(KERN_INFO FW_INFO "This is done stupid, we can handle it, but it should better be avoided in future\n"); or on the Linuxfirmwarekit to tell vendors that they did something stupid or wrong without bothering the user: printk(KERN_INFO FW_BUG "This is done stupid, we can handle it, but it should better be avoided in future\n"); Some use cases: - If a user sees a [Firmware Bug] message in the kernel he should first update the BIOS before wasting time with debugging and submiting on old firmware code to mailing lists. - The linuxfirmwarekit (http://www.linuxfirmwarekit.org) tries to detect firmware bugs. It currently is doing that in userspace which results in: - Huge test scripts that could be a one liner in the kernel - A lot of BIOS bugs are already absorbed by the kernel What do we need such a stupid linuxfirmwarekit for? - Vendors: Can test their BIOSes for Linux compatibility. There will be the time when vendors realize that the test utils on Linux are more strict and using them increases the qualitity and stability of their products. - Vendors: Can easily fix up their BIOSes and be more Linux compatible by: dmesg |grep "Firmware Bug" and send the result to their BIOS developer colleagues who should know what the messages are about and how to fix them, without the need of studying kernel code. - Distributions: can do a first automated HW/BIOS checks. This can then be done without the need of asking kernel developers who need to dig down the code and explain the details. Certification can/will just be rejected until dmesg |grep "Firmware Bug" is empty. - Thus this can be used as an instrument to enforce cleaner BIOS code. Currently every stupid Windows ACPI bug is re-implemented in Linux which is a rather unfortunate situation. We already have the power to avoid this in e.g. memory or cpu hot-plug ACPI implementations, because Linux certification is a must for most vendors in the server area. Working towards being able to do that in the laptop area (vendors are starting to look at Linux here also and will use this tool) is the goal. At least provide them a tool to make it as easy for this guys (e.g. not needing to browse kernel code) as possible. - The ordinary Linux user: can go into the next shop, boots the firmwarekit on his most preferred machines. He chooses one without BIOS bugs. Unsupported HW is ok, he likes to try out latest projects which might support them or likes to dig on it on his own, but he hates to workaround broken BIOSes like hell. I double checked with the firmwarekit. There they have: So the mapping generally is (also depending on how likely the BIOS is to blame, this could sometimes be difficult): FW_INFO = INFO FW_WARN = WARN FW_BUG = FAIL For more info about the linuxfirmwarekit and why this is needed can be found here: http://www.linuxfirmwarekit.org While severity matches with the firmwarekit, it might be tricky to hide messages from the user. E.g. we recently found out that on HP BIOSes negative temperatures are returned, which seem to indicate that the thermal zone is invalid. We can work around that gracefully by ignoring the thermal zone and we do not want to bother the ordinary user with a frightening message: Firmware Bug: thermal management absolutely broken but want to hide it from the user. But in the linuxfirmwarekit this should be shown as a real show stopper (the temperatures could really be wrong, broken thermal management is one of the worst things that can happen and the BIOS guys of the machine must implement this properly). It is intended to do that (hide it from the user with KERN_INFO msg, but still print it as a BIOS bug) by: printk(KERN_INFO FW_BUG "Negativ temperature values detected. Try to workarounded, BIOS must get fixed\n"); Hope that works out..., no idea how to better hide it as printk is the only way to easily provide this functionality. Signed-off-by: Thomas Renninger Signed-off-by: Andi Kleen Signed-off-by: Len Brown diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2651f80..0b19848 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -190,6 +190,30 @@ extern int kernel_text_address(unsigned long addr); struct pid; extern struct pid *session_of_pgrp(struct pid *pgrp); +/* + * FW_BUG + * Add this to a message where you are sure the firmware is buggy or behaves + * really stupid or out of spec. Be aware that the responsible BIOS developer + * should be able to fix this issue or at least get a concrete idea of the + * problem by reading your message without the need of looking at the kernel + * code. + * + * Use it for definite and high priority BIOS bugs. + * + * FW_WARN + * Use it for not that clear (e.g. could the kernel messed up things already?) + * and medium priority BIOS bugs. + * + * FW_INFO + * Use this one if you want to tell the user or vendor about something + * suspicious, but generally harmless related to the firmware. + * + * Use it for information or very low priority BIOS bugs. + */ +#define FW_BUG "[Firmware Bug]: " +#define FW_WARN "[Firmware Warn]: " +#define FW_INFO "[Firmware Info]: " + #ifdef CONFIG_PRINTK asmlinkage int vprintk(const char *fmt, va_list args) __attribute__ ((format (printf, 1, 0))); -- cgit v0.10.2 From 910dfae298f7b3dae0e9a52736182a3b0559ca35 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Mon, 1 Sep 2008 14:27:04 +0200 Subject: ACPI: cpufreq, processor: Detect old BIOS, not supporting CPU freq on a recent CPU. On Intel CPUs it is rather common and a good hint that BIOSes which do provide _PPC func, but not the frequencies itself in _PSS function, are old and need to be updated for CPU freq support. Tell the user/vendor he has a BIOS/firmware problem. Make use of FW_BUG interface to give vendors and users the ability to automatically check with (or let linuxfirmwarekit do that): dmesg |grep "Firmware Bug" Signed-off-by: Thomas Renninger Signed-off-by: Andi Kleen Signed-off-by: Len Brown diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 80c251e..242f814 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -38,6 +38,7 @@ #include #endif +#include #include #include @@ -334,7 +335,6 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr) acpi_status status = AE_OK; acpi_handle handle = NULL; - if (!pr || !pr->performance || !pr->handle) return -EINVAL; @@ -347,13 +347,25 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr) result = acpi_processor_get_performance_control(pr); if (result) - return result; + goto update_bios; result = acpi_processor_get_performance_states(pr); if (result) - return result; + goto update_bios; return 0; + + /* + * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that + * the BIOS is older than the CPU and does not know its frequencies + */ + update_bios: + if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){ + if(boot_cpu_has(X86_FEATURE_EST)) + printk(KERN_WARNING FW_BUG "BIOS needs update for CPU " + "frequency support\n"); + } + return result; } int acpi_processor_notify_smm(struct module *calling_module) -- cgit v0.10.2 From 2fd47094f92fa2bdbf99be33294a7b6b97785a70 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Mon, 1 Sep 2008 14:27:03 +0200 Subject: CPUFREQ: powernow-k8: Try to detect old BIOS, not supporting CPU freq on a recent AMD CPUs. Make use of FW_BUG interface to give vendors and users the ability to automatically check for powernow-k8 related BIOS bugs by: dmesg |grep "Firmware Bug" Signed-off-by: Thomas Renninger Signed-off-by: Andi Kleen Signed-off-by: Len Brown diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 84bb395..4e0c6ab 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -45,7 +45,6 @@ #endif #define PFX "powernow-k8: " -#define BFX PFX "BIOS error: " #define VERSION "version 2.20.00" #include "powernow-k8.h" @@ -536,35 +535,40 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 for (j = 0; j < data->numps; j++) { if (pst[j].vid > LEAST_VID) { - printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid); + printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n", + j, pst[j].vid); return -EINVAL; } if (pst[j].vid < data->rvo) { /* vid + rvo >= 0 */ - printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j); + printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" + " %d\n", j); return -ENODEV; } if (pst[j].vid < maxvid + data->rvo) { /* vid + rvo >= maxvid */ - printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j); + printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" + " %d\n", j); return -ENODEV; } if (pst[j].fid > MAX_FID) { - printk(KERN_ERR BFX "maxfid exceeded with pstate %d\n", j); + printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate" + " %d\n", j); return -ENODEV; } if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { /* Only first fid is allowed to be in "low" range */ - printk(KERN_ERR BFX "two low fids - %d : 0x%x\n", j, pst[j].fid); + printk(KERN_ERR FW_BUG PFX "two low fids - %d : " + "0x%x\n", j, pst[j].fid); return -EINVAL; } if (pst[j].fid < lastfid) lastfid = pst[j].fid; } if (lastfid & 1) { - printk(KERN_ERR BFX "lastfid invalid\n"); + printk(KERN_ERR FW_BUG PFX "lastfid invalid\n"); return -EINVAL; } if (lastfid > LO_FID_TABLE_TOP) - printk(KERN_INFO BFX "first fid not from lo freq table\n"); + printk(KERN_INFO FW_BUG PFX "first fid not from lo freq table\n"); return 0; } @@ -672,13 +676,13 @@ static int find_psb_table(struct powernow_k8_data *data) dprintk("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { - printk(KERN_ERR BFX "PSB table is not v1.4\n"); + printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); return -ENODEV; } dprintk("flags: 0x%x\n", psb->flags1); if (psb->flags1) { - printk(KERN_ERR BFX "unknown flags\n"); + printk(KERN_ERR FW_BUG PFX "unknown flags\n"); return -ENODEV; } @@ -705,7 +709,7 @@ static int find_psb_table(struct powernow_k8_data *data) } } if (cpst != 1) { - printk(KERN_ERR BFX "numpst must be 1\n"); + printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); return -ENODEV; } @@ -1130,17 +1134,19 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) "ACPI Processor module before starting this " "driver.\n"); #else - printk(KERN_ERR PFX "Your BIOS does not provide ACPI " - "_PSS objects in a way that Linux understands. " - "Please report this to the Linux ACPI maintainers" - " and complain to your BIOS vendor.\n"); + printk(KERN_ERR FW_BUG PFX "Your BIOS does not provide" + " ACPI _PSS objects in a way that Linux " + "understands. Please report this to the Linux " + "ACPI maintainers and complain to your BIOS " + "vendor.\n"); #endif kfree(data); return -ENODEV; } if (pol->cpu != 0) { - printk(KERN_ERR PFX "No ACPI _PSS objects for CPU other than " - "CPU0. Complain to your BIOS vendor.\n"); + printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " + "CPU other than CPU0. Complain to your BIOS " + "vendor.\n"); kfree(data); return -ENODEV; } @@ -1193,7 +1199,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) /* min/max the cpu is capable of */ if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { - printk(KERN_ERR PFX "invalid powernow_table\n"); + printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); powernow_k8_cpu_exit_acpi(data); kfree(data->powernow_table); kfree(data); -- cgit v0.10.2 From bb34d92f643086d546b49cef680f6f305ed84414 Mon Sep 17 00:00:00 2001 From: Frank Mayhar Date: Fri, 12 Sep 2008 09:54:39 -0700 Subject: timers: fix itimer/many thread hang, v2 This is the second resubmission of the posix timer rework patch, posted a few days ago. This includes the changes from the previous resubmittion, which addressed Oleg Nesterov's comments, removing the RCU stuff from the patch and un-inlining the thread_group_cputime() function for SMP. In addition, per Ingo Molnar it simplifies the UP code, consolidating much of it with the SMP version and depending on lower-level SMP/UP handling to take care of the differences. It also cleans up some UP compile errors, moves the scheduler stats-related macros into kernel/sched_stats.h, cleans up a merge error in kernel/fork.c and has a few other minor fixes and cleanups as suggested by Oleg and Ingo. Thanks for the review, guys. Signed-off-by: Frank Mayhar Cc: Roland McGrath Cc: Alexey Dobriyan Cc: Andrew Morton Signed-off-by: Ingo Molnar diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index cf9f40a..cac3750 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -52,6 +52,7 @@ static inline int kstat_irqs(int irq) return sum; } +extern unsigned long long task_delta_exec(struct task_struct *); extern void account_user_time(struct task_struct *, cputime_t); extern void account_user_time_scaled(struct task_struct *, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t); diff --git a/include/linux/sched.h b/include/linux/sched.h index 7ce8d4e..b982fb4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -454,15 +454,9 @@ struct task_cputime { * This structure contains the version of task_cputime, above, that is * used for thread group CPU clock calculations. */ -#ifdef CONFIG_SMP struct thread_group_cputime { struct task_cputime *totals; }; -#else -struct thread_group_cputime { - struct task_cputime totals; -}; -#endif /* * NOTE! "signal_struct" does not have it's own @@ -2124,193 +2118,26 @@ static inline int spin_needbreak(spinlock_t *lock) /* * Thread group CPU time accounting. */ -#ifdef CONFIG_SMP -extern int thread_group_cputime_alloc_smp(struct task_struct *); -extern void thread_group_cputime_smp(struct task_struct *, struct task_cputime *); +extern int thread_group_cputime_alloc(struct task_struct *); +extern void thread_group_cputime(struct task_struct *, struct task_cputime *); static inline void thread_group_cputime_init(struct signal_struct *sig) { sig->cputime.totals = NULL; } -static inline int thread_group_cputime_clone_thread(struct task_struct *curr, - struct task_struct *new) +static inline int thread_group_cputime_clone_thread(struct task_struct *curr) { if (curr->signal->cputime.totals) return 0; - return thread_group_cputime_alloc_smp(curr); + return thread_group_cputime_alloc(curr); } -static inline void thread_group_cputime_free(struct signal_struct *sig) -{ - free_percpu(sig->cputime.totals); -} - -/** - * thread_group_cputime - Sum the thread group time fields across all CPUs. - * - * This is a wrapper for the real routine, thread_group_cputime_smp(). See - * that routine for details. - */ -static inline void thread_group_cputime( - struct task_struct *tsk, - struct task_cputime *times) -{ - thread_group_cputime_smp(tsk, times); -} - -/** - * thread_group_cputime_account_user - Maintain utime for a thread group. - * - * @tgtimes: Pointer to thread_group_cputime structure. - * @cputime: Time value by which to increment the utime field of that - * structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the utime field there. - */ -static inline void thread_group_cputime_account_user( - struct thread_group_cputime *tgtimes, - cputime_t cputime) -{ - if (tgtimes->totals) { - struct task_cputime *times; - - times = per_cpu_ptr(tgtimes->totals, get_cpu()); - times->utime = cputime_add(times->utime, cputime); - put_cpu_no_resched(); - } -} - -/** - * thread_group_cputime_account_system - Maintain stime for a thread group. - * - * @tgtimes: Pointer to thread_group_cputime structure. - * @cputime: Time value by which to increment the stime field of that - * structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the stime field there. - */ -static inline void thread_group_cputime_account_system( - struct thread_group_cputime *tgtimes, - cputime_t cputime) -{ - if (tgtimes->totals) { - struct task_cputime *times; - - times = per_cpu_ptr(tgtimes->totals, get_cpu()); - times->stime = cputime_add(times->stime, cputime); - put_cpu_no_resched(); - } -} - -/** - * thread_group_cputime_account_exec_runtime - Maintain exec runtime for a - * thread group. - * - * @tgtimes: Pointer to thread_group_cputime structure. - * @ns: Time value by which to increment the sum_exec_runtime field - * of that structure. - * - * If thread group time is being maintained, get the structure for the - * running CPU and update the sum_exec_runtime field there. - */ -static inline void thread_group_cputime_account_exec_runtime( - struct thread_group_cputime *tgtimes, - unsigned long long ns) -{ - if (tgtimes->totals) { - struct task_cputime *times; - - times = per_cpu_ptr(tgtimes->totals, get_cpu()); - times->sum_exec_runtime += ns; - put_cpu_no_resched(); - } -} - -#else /* CONFIG_SMP */ - -static inline void thread_group_cputime_init(struct signal_struct *sig) -{ - sig->cputime.totals.utime = cputime_zero; - sig->cputime.totals.stime = cputime_zero; - sig->cputime.totals.sum_exec_runtime = 0; -} - -static inline int thread_group_cputime_alloc(struct task_struct *tsk) -{ - return 0; -} static inline void thread_group_cputime_free(struct signal_struct *sig) { -} - -static inline int thread_group_cputime_clone_thread(struct task_struct *curr, - struct task_struct *tsk) -{ - return 0; -} - -static inline void thread_group_cputime(struct task_struct *tsk, - struct task_cputime *cputime) -{ - *cputime = tsk->signal->cputime.totals; -} - -static inline void thread_group_cputime_account_user( - struct thread_group_cputime *tgtimes, - cputime_t cputime) -{ - tgtimes->totals.utime = cputime_add(tgtimes->totals.utime, cputime); -} - -static inline void thread_group_cputime_account_system( - struct thread_group_cputime *tgtimes, - cputime_t cputime) -{ - tgtimes->totals.stime = cputime_add(tgtimes->totals.stime, cputime); -} - -static inline void thread_group_cputime_account_exec_runtime( - struct thread_group_cputime *tgtimes, - unsigned long long ns) -{ - tgtimes->totals.sum_exec_runtime += ns; -} - -#endif /* CONFIG_SMP */ - -static inline void account_group_user_time(struct task_struct *tsk, - cputime_t cputime) -{ - struct signal_struct *sig; - - sig = tsk->signal; - if (likely(sig)) - thread_group_cputime_account_user(&sig->cputime, cputime); -} - -static inline void account_group_system_time(struct task_struct *tsk, - cputime_t cputime) -{ - struct signal_struct *sig; - - sig = tsk->signal; - if (likely(sig)) - thread_group_cputime_account_system(&sig->cputime, cputime); -} - -static inline void account_group_exec_runtime(struct task_struct *tsk, - unsigned long long ns) -{ - struct signal_struct *sig; - - sig = tsk->signal; - if (likely(sig)) - thread_group_cputime_account_exec_runtime(&sig->cputime, ns); + free_percpu(sig->cputime.totals); } /* diff --git a/kernel/fork.c b/kernel/fork.c index 1181b9a..021ae01 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -791,7 +791,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) int ret; if (clone_flags & CLONE_THREAD) { - ret = thread_group_cputime_clone_thread(current, tsk); + ret = thread_group_cputime_clone_thread(current); if (likely(!ret)) { atomic_inc(¤t->signal->count); atomic_inc(¤t->signal->live); @@ -834,9 +834,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; task_io_accounting_init(&sig->ioac); - INIT_LIST_HEAD(&sig->cpu_timers[0]); - INIT_LIST_HEAD(&sig->cpu_timers[1]); - INIT_LIST_HEAD(&sig->cpu_timers[2]); taskstats_tgid_init(sig); task_lock(current->group_leader); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 9a7ea04..153dcb2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -7,50 +7,46 @@ #include #include #include +#include -#ifdef CONFIG_SMP /* - * Allocate the thread_group_cputime structure appropriately for SMP kernels - * and fill in the current values of the fields. Called from copy_signal() - * via thread_group_cputime_clone_thread() when adding a second or subsequent + * Allocate the thread_group_cputime structure appropriately and fill in the + * current values of the fields. Called from copy_signal() via + * thread_group_cputime_clone_thread() when adding a second or subsequent * thread to a thread group. Assumes interrupts are enabled when called. */ -int thread_group_cputime_alloc_smp(struct task_struct *tsk) +int thread_group_cputime_alloc(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct task_cputime *cputime; /* * If we have multiple threads and we don't already have a - * per-CPU task_cputime struct, allocate one and fill it in with - * the times accumulated so far. + * per-CPU task_cputime struct (checked in the caller), allocate + * one and fill it in with the times accumulated so far. We may + * race with another thread so recheck after we pick up the sighand + * lock. */ - if (sig->cputime.totals) - return 0; cputime = alloc_percpu(struct task_cputime); if (cputime == NULL) return -ENOMEM; - read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); if (sig->cputime.totals) { spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); free_percpu(cputime); return 0; } sig->cputime.totals = cputime; - cputime = per_cpu_ptr(sig->cputime.totals, get_cpu()); + cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); cputime->utime = tsk->utime; cputime->stime = tsk->stime; cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; - put_cpu_no_resched(); spin_unlock_irq(&tsk->sighand->siglock); - read_unlock(&tasklist_lock); return 0; } /** - * thread_group_cputime_smp - Sum the thread group time fields across all CPUs. + * thread_group_cputime - Sum the thread group time fields across all CPUs. * * @tsk: The task we use to identify the thread group. * @times: task_cputime structure in which we return the summed fields. @@ -58,7 +54,7 @@ int thread_group_cputime_alloc_smp(struct task_struct *tsk) * Walk the list of CPUs to sum the per-CPU time fields in the thread group * time structure. */ -void thread_group_cputime_smp( +void thread_group_cputime( struct task_struct *tsk, struct task_cputime *times) { @@ -83,8 +79,6 @@ void thread_group_cputime_smp( } } -#endif /* CONFIG_SMP */ - /* * Called after updating RLIMIT_CPU to set timer expiration if necessary. */ @@ -300,7 +294,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, cpu->cpu = virt_ticks(p); break; case CPUCLOCK_SCHED: - cpu->sched = task_sched_runtime(p); + cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); break; } return 0; @@ -309,16 +303,15 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, /* * Sample a process (thread group) clock for the given group_leader task. * Must be called with tasklist_lock held for reading. - * Must be called with tasklist_lock held for reading, and p->sighand->siglock. */ -static int cpu_clock_sample_group_locked(unsigned int clock_idx, - struct task_struct *p, - union cpu_time_count *cpu) +static int cpu_clock_sample_group(const clockid_t which_clock, + struct task_struct *p, + union cpu_time_count *cpu) { struct task_cputime cputime; thread_group_cputime(p, &cputime); - switch (clock_idx) { + switch (which_clock) { default: return -EINVAL; case CPUCLOCK_PROF: @@ -328,29 +321,12 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx, cpu->cpu = cputime.utime; break; case CPUCLOCK_SCHED: - cpu->sched = thread_group_sched_runtime(p); + cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); break; } return 0; } -/* - * Sample a process (thread group) clock for the given group_leader task. - * Must be called with tasklist_lock held for reading. - */ -static int cpu_clock_sample_group(const clockid_t which_clock, - struct task_struct *p, - union cpu_time_count *cpu) -{ - int ret; - unsigned long flags; - spin_lock_irqsave(&p->sighand->siglock, flags); - ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p, - cpu); - spin_unlock_irqrestore(&p->sighand->siglock, flags); - return ret; -} - int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) { @@ -1324,29 +1300,37 @@ static inline int task_cputime_expired(const struct task_cputime *sample, * fastpath_timer_check - POSIX CPU timers fast path. * * @tsk: The task (thread) being checked. - * @sig: The signal pointer for that task. * - * If there are no timers set return false. Otherwise snapshot the task and - * thread group timers, then compare them with the corresponding expiration - # times. Returns true if a timer has expired, else returns false. + * Check the task and thread group timers. If both are zero (there are no + * timers set) return false. Otherwise snapshot the task and thread group + * timers and compare them with the corresponding expiration times. Return + * true if a timer has expired, else return false. */ -static inline int fastpath_timer_check(struct task_struct *tsk, - struct signal_struct *sig) +static inline int fastpath_timer_check(struct task_struct *tsk) { - struct task_cputime task_sample = { - .utime = tsk->utime, - .stime = tsk->stime, - .sum_exec_runtime = tsk->se.sum_exec_runtime - }; - struct task_cputime group_sample; + struct signal_struct *sig = tsk->signal; - if (task_cputime_zero(&tsk->cputime_expires) && - task_cputime_zero(&sig->cputime_expires)) + if (unlikely(!sig)) return 0; - if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) - return 1; - thread_group_cputime(tsk, &group_sample); - return task_cputime_expired(&group_sample, &sig->cputime_expires); + + if (!task_cputime_zero(&tsk->cputime_expires)) { + struct task_cputime task_sample = { + .utime = tsk->utime, + .stime = tsk->stime, + .sum_exec_runtime = tsk->se.sum_exec_runtime + }; + + if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) + return 1; + } + if (!task_cputime_zero(&sig->cputime_expires)) { + struct task_cputime group_sample; + + thread_group_cputime(tsk, &group_sample); + if (task_cputime_expired(&group_sample, &sig->cputime_expires)) + return 1; + } + return 0; } /* @@ -1358,43 +1342,34 @@ void run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; - struct signal_struct *sig; - struct sighand_struct *sighand; - unsigned long flags; BUG_ON(!irqs_disabled()); - /* Pick up tsk->signal and make sure it's valid. */ - sig = tsk->signal; /* * The fast path checks that there are no expired thread or thread - * group timers. If that's so, just return. Also check that - * tsk->signal is non-NULL; this probably can't happen but cover the - * possibility anyway. + * group timers. If that's so, just return. */ - if (unlikely(!sig) || !fastpath_timer_check(tsk, sig)) + if (!fastpath_timer_check(tsk)) return; - sighand = lock_task_sighand(tsk, &flags); - if (likely(sighand)) { - /* - * Here we take off tsk->signal->cpu_timers[N] and - * tsk->cpu_timers[N] all the timers that are firing, and - * put them on the firing list. - */ - check_thread_timers(tsk, &firing); - check_process_timers(tsk, &firing); + spin_lock(&tsk->sighand->siglock); + /* + * Here we take off tsk->signal->cpu_timers[N] and + * tsk->cpu_timers[N] all the timers that are firing, and + * put them on the firing list. + */ + check_thread_timers(tsk, &firing); + check_process_timers(tsk, &firing); - /* - * We must release these locks before taking any timer's lock. - * There is a potential race with timer deletion here, as the - * siglock now protects our private firing list. We have set - * the firing flag in each timer, so that a deletion attempt - * that gets the timer lock before we do will give it up and - * spin until we've taken care of that timer below. - */ - } - unlock_task_sighand(tsk, &flags); + /* + * We must release these locks before taking any timer's lock. + * There is a potential race with timer deletion here, as the + * siglock now protects our private firing list. We have set + * the firing flag in each timer, so that a deletion attempt + * that gets the timer lock before we do will give it up and + * spin until we've taken care of that timer below. + */ + spin_unlock(&tsk->sighand->siglock); /* * Now that all the timers on our list have the firing flag, @@ -1433,7 +1408,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, struct list_head *head; BUG_ON(clock_idx == CPUCLOCK_SCHED); - cpu_clock_sample_group_locked(clock_idx, tsk, &now); + cpu_clock_sample_group(clock_idx, tsk, &now); if (oldval) { if (!cputime_eq(*oldval, cputime_zero)) { diff --git a/kernel/sched.c b/kernel/sched.c index c51b5d2..260c22c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4039,55 +4039,22 @@ EXPORT_PER_CPU_SYMBOL(kstat); /* * Return any ns on the sched_clock that have not yet been banked in * @p in case that task is currently running. - * - * Called with task_rq_lock() held on @rq. */ -static unsigned long long task_delta_exec(struct task_struct *p, struct rq *rq) +unsigned long long task_delta_exec(struct task_struct *p) { + struct rq *rq; + unsigned long flags; + u64 ns = 0; + + rq = task_rq_lock(p, &flags); if (task_current(rq, p)) { u64 delta_exec; update_rq_clock(rq); delta_exec = rq->clock - p->se.exec_start; if ((s64)delta_exec > 0) - return delta_exec; + ns = delta_exec; } - return 0; -} - -/* - * Return p->sum_exec_runtime plus any more ns on the sched_clock - * that have not yet been banked in case the task is currently running. - */ -unsigned long long task_sched_runtime(struct task_struct *p) -{ - unsigned long flags; - u64 ns; - struct rq *rq; - - rq = task_rq_lock(p, &flags); - ns = p->se.sum_exec_runtime + task_delta_exec(p, rq); - task_rq_unlock(rq, &flags); - - return ns; -} - -/* - * Return sum_exec_runtime for the thread group plus any more ns on the - * sched_clock that have not yet been banked in case the task is currently - * running. - */ -unsigned long long thread_group_sched_runtime(struct task_struct *p) -{ - unsigned long flags; - u64 ns; - struct rq *rq; - struct task_cputime totals; - - rq = task_rq_lock(p, &flags); - thread_group_cputime(p, &totals); - ns = totals.sum_exec_runtime + task_delta_exec(p, rq); - task_rq_unlock(rq, &flags); return ns; } diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 8385d43..d6903bd 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -270,3 +270,139 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) #define sched_info_switch(t, next) do { } while (0) #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ +/* + * The following are functions that support scheduler-internal time accounting. + * These functions are generally called at the timer tick. None of this depends + * on CONFIG_SCHEDSTATS. + */ + +#ifdef CONFIG_SMP + +/** + * thread_group_cputime_account_user - Maintain utime for a thread group. + * + * @tgtimes: Pointer to thread_group_cputime structure. + * @cputime: Time value by which to increment the utime field of that + * structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the utime field there. + */ +static inline void thread_group_cputime_account_user( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + if (tgtimes->totals) { + struct task_cputime *times; + + times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times->utime = cputime_add(times->utime, cputime); + put_cpu_no_resched(); + } +} + +/** + * thread_group_cputime_account_system - Maintain stime for a thread group. + * + * @tgtimes: Pointer to thread_group_cputime structure. + * @cputime: Time value by which to increment the stime field of that + * structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the stime field there. + */ +static inline void thread_group_cputime_account_system( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + if (tgtimes->totals) { + struct task_cputime *times; + + times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times->stime = cputime_add(times->stime, cputime); + put_cpu_no_resched(); + } +} + +/** + * thread_group_cputime_account_exec_runtime - Maintain exec runtime for a + * thread group. + * + * @tgtimes: Pointer to thread_group_cputime structure. + * @ns: Time value by which to increment the sum_exec_runtime field + * of that structure. + * + * If thread group time is being maintained, get the structure for the + * running CPU and update the sum_exec_runtime field there. + */ +static inline void thread_group_cputime_account_exec_runtime( + struct thread_group_cputime *tgtimes, + unsigned long long ns) +{ + if (tgtimes->totals) { + struct task_cputime *times; + + times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times->sum_exec_runtime += ns; + put_cpu_no_resched(); + } +} + +#else /* CONFIG_SMP */ + +static inline void thread_group_cputime_account_user( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + tgtimes->totals->utime = cputime_add(tgtimes->totals->utime, cputime); +} + +static inline void thread_group_cputime_account_system( + struct thread_group_cputime *tgtimes, + cputime_t cputime) +{ + tgtimes->totals->stime = cputime_add(tgtimes->totals->stime, cputime); +} + +static inline void thread_group_cputime_account_exec_runtime( + struct thread_group_cputime *tgtimes, + unsigned long long ns) +{ + tgtimes->totals->sum_exec_runtime += ns; +} + +#endif /* CONFIG_SMP */ + +/* + * These are the generic time-accounting routines that use the above + * functions. They are the functions actually called by the scheduler. + */ +static inline void account_group_user_time(struct task_struct *tsk, + cputime_t cputime) +{ + struct signal_struct *sig; + + sig = tsk->signal; + if (likely(sig)) + thread_group_cputime_account_user(&sig->cputime, cputime); +} + +static inline void account_group_system_time(struct task_struct *tsk, + cputime_t cputime) +{ + struct signal_struct *sig; + + sig = tsk->signal; + if (likely(sig)) + thread_group_cputime_account_system(&sig->cputime, cputime); +} + +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) +{ + struct signal_struct *sig; + + sig = tsk->signal; + if (likely(sig)) + thread_group_cputime_account_exec_runtime(&sig->cputime, ns); +} -- cgit v0.10.2 From 59f647c25a4f27c1e5c84710e0608b36303089f9 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Tue, 23 Sep 2008 15:55:56 -0700 Subject: fsldma: remove internal self-test from Freescale Elo DMA driver The Freescale Elo DMA driver runs an internal self-test before registering the channels with the DMA engine. This self-test has a fundemental flaw in that it calls the DMA engine's callback functions directly before the registration. However, the registration initializes some variables that the callback functions uses, namely the device struct. The code works today because there are two device structs: the one created by the DMA engine, and one created by the Open Firmware (OF) subsystem. The self-test currently uses the device struct created by OF. However, in the future, some of the device structs created by OF will be eliminated. This means that the self-test will only have access to the device struct created by the DMA engine. But this device struct isn't initialized when the self-test runs, and this causes a kernel panic. Since there is already a DMA test module (dmatest), the internal self-test code is not useful anyway. It is extremely unlikely that the test will fail in normal usage. It may have been helpful during development, but not any more. Cc: Kumar Gala Cc: Li Yang Cc: Scott Wood Signed-off-by: Timur Tabi Signed-off-by: Dan Williams diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index c0059ca..e9b2638 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -786,132 +786,6 @@ static void dma_do_tasklet(unsigned long data) fsl_chan_ld_cleanup(fsl_chan); } -static void fsl_dma_callback_test(void *param) -{ - struct fsl_dma_chan *fsl_chan = param; - if (fsl_chan) - dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n"); -} - -static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) -{ - struct dma_chan *chan; - int err = 0; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - u8 *src, *dest; - int i; - size_t test_size; - struct dma_async_tx_descriptor *tx1, *tx2, *tx3; - - test_size = 4096; - - src = kmalloc(test_size * 2, GFP_KERNEL); - if (!src) { - dev_err(fsl_chan->dev, - "selftest: Cannot alloc memory for test!\n"); - return -ENOMEM; - } - - dest = src + test_size; - - for (i = 0; i < test_size; i++) - src[i] = (u8) i; - - chan = &fsl_chan->common; - - if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { - dev_err(fsl_chan->dev, - "selftest: Cannot alloc resources for DMA\n"); - err = -ENODEV; - goto out; - } - - /* TX 1 */ - dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, - DMA_TO_DEVICE); - dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, - DMA_FROM_DEVICE); - tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); - async_tx_ack(tx1); - - cookie = fsl_dma_tx_submit(tx1); - fsl_dma_memcpy_issue_pending(chan); - msleep(2); - - if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { - dev_err(fsl_chan->dev, "selftest: Time out!\n"); - err = -ENODEV; - goto free_resources; - } - - /* Test free and re-alloc channel resources */ - fsl_dma_free_chan_resources(chan); - - if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) { - dev_err(fsl_chan->dev, - "selftest: Cannot alloc resources for DMA\n"); - err = -ENODEV; - goto free_resources; - } - - /* Continue to test - * TX 2 - */ - dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, - test_size / 4, DMA_TO_DEVICE); - dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, - test_size / 4, DMA_FROM_DEVICE); - tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); - async_tx_ack(tx2); - - /* TX 3 */ - dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, - test_size / 4, DMA_TO_DEVICE); - dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, - test_size / 4, DMA_FROM_DEVICE); - tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); - async_tx_ack(tx3); - - /* Interrupt tx test */ - tx1 = fsl_dma_prep_interrupt(chan, 0); - async_tx_ack(tx1); - cookie = fsl_dma_tx_submit(tx1); - - /* Test exchanging the prepared tx sort */ - cookie = fsl_dma_tx_submit(tx3); - cookie = fsl_dma_tx_submit(tx2); - - if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) - dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { - tx3->callback = fsl_dma_callback_test; - tx3->callback_param = fsl_chan; - } - fsl_dma_memcpy_issue_pending(chan); - msleep(2); - - if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { - dev_err(fsl_chan->dev, "selftest: Time out!\n"); - err = -ENODEV; - goto free_resources; - } - - err = memcmp(src, dest, test_size); - if (err) { - for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); - i++); - dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " - "error! src 0x%x, dest 0x%x\n", - i, (long)test_size, *(src + i), *(dest + i)); - } - -free_resources: - fsl_dma_free_chan_resources(chan); -out: - kfree(src); - return err; -} - static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, const struct of_device_id *match) { @@ -1000,17 +874,11 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, } } - err = fsl_dma_self_test(new_fsl_chan); - if (err) - goto err_self_test; - dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, match->compatible, new_fsl_chan->irq); return 0; -err_self_test: - free_irq(new_fsl_chan->irq, new_fsl_chan); err_no_irq: list_del(&new_fsl_chan->common.device_node); err_no_chan: -- cgit v0.10.2 From 8b59560a3baf2e7c24e0fb92ea5d09eca92805db Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:02:03 +0800 Subject: ACPI: dock: avoid check _STA method In some BIOSes, every _STA method call will send a notification again, this cause freeze. And in some BIOSes, it appears _STA should be called after _DCK. This tries to avoid calls _STA, and still keep the device present check. http://bugzilla.kernel.org/show_bug.cgi?id=10431 Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 7d2edf1..25d2161 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -604,14 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event) static void dock_notify(acpi_handle handle, u32 event, void *data) { struct dock_station *ds = data; + struct acpi_device *tmp; switch (event) { case ACPI_NOTIFY_BUS_CHECK: - if (!dock_in_progress(ds) && dock_present(ds)) { + if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle, + &tmp)) { begin_dock(ds); dock(ds); if (!dock_present(ds)) { printk(KERN_ERR PREFIX "Unable to dock!\n"); + complete_dock(ds); break; } atomic_notifier_call_chain(&dock_notifier_list, -- cgit v0.10.2 From 82545394e0690aaef446cb262aa5dac0f9c7156e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:02:41 +0800 Subject: dock: fix eject request process (2.6.27-rc1 regression) commit 2a7feab28d3fc060d320eaba192e49dad1079b7e introduces a bug. My thinkpad actually will send an eject_request and we should follow the eject process to finish the eject, otherwise system still thinks the bay is present. Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 25d2161..78d27ce 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -575,11 +575,6 @@ static int handle_eject_request(struct dock_station *ds, u32 event) */ dock_event(ds, event, UNDOCK_EVENT); - if (!dock_present(ds)) { - complete_undock(ds); - return -ENODEV; - } - hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); undock(ds); eject_dock(ds); -- cgit v0.10.2 From 406f692d0803d73acd3984c1e11719d3a913fd5e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:03:26 +0800 Subject: dock: add _LCK support support _LCK method, which is a optional method for hotplug lenb: we have not seen _LCK used in the field yet Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 78d27ce..7bdf93b 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -452,6 +452,25 @@ static inline void complete_undock(struct dock_station *ds) ds->flags &= ~(DOCK_UNDOCKING); } +static void dock_lock(struct dock_station *ds, int lock) +{ + struct acpi_object_list arg_list; + union acpi_object arg; + acpi_status status; + + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = !!lock; + status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL); + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + if (lock) + printk(KERN_WARNING PREFIX "Locking device failed\n"); + else + printk(KERN_WARNING PREFIX "Unlocking device failed\n"); + } +} + /** * dock_in_progress - see if we are in the middle of handling a dock event * @ds: the dock station @@ -577,6 +596,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event) hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); undock(ds); + dock_lock(ds, 0); eject_dock(ds); if (dock_present(ds)) { printk(KERN_ERR PREFIX "Unable to undock!\n"); @@ -617,6 +637,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) hotplug_dock_devices(ds, event); complete_dock(ds); dock_event(ds, event, DOCK_EVENT); + dock_lock(ds, 1); } break; case ACPI_NOTIFY_DEVICE_CHECK: -- cgit v0.10.2 From db350b084dc2cf816288643861ce07b0562dd723 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:03:58 +0800 Subject: dock: add bay and battery hotplug support Make the dock driver support bay and battery hotplug. They are all regarded as dock, so handling can be unified. Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 7bdf93b..799a0fd 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -48,7 +48,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to " " before undocking"); static struct atomic_notifier_head dock_notifier_list; -static struct platform_device *dock_device; static char dock_device_name[] = "dock"; static const struct acpi_device_id dock_device_ids[] = { @@ -65,7 +64,12 @@ struct dock_station { struct mutex hp_lock; struct list_head dependent_devices; struct list_head hotplug_devices; + + struct list_head sibiling; + struct platform_device *dock_device; }; +static LIST_HEAD(dock_stations); +static int dock_station_count; struct dock_dependent_device { struct list_head list; @@ -77,11 +81,12 @@ struct dock_dependent_device { #define DOCK_DOCKING 0x00000001 #define DOCK_UNDOCKING 0x00000002 +#define DOCK_IS_DOCK 0x00000010 +#define DOCK_IS_ATA 0x00000020 +#define DOCK_IS_BAT 0x00000040 #define DOCK_EVENT 3 #define UNDOCK_EVENT 2 -static struct dock_station *dock_station; - /***************************************************************************** * Dock Dependent device functions * *****************************************************************************/ @@ -199,6 +204,60 @@ static int is_dock(acpi_handle handle) return 1; } +static int is_ejectable(acpi_handle handle) +{ + acpi_status status; + acpi_handle tmp; + + status = acpi_get_handle(handle, "_EJ0", &tmp); + if (ACPI_FAILURE(status)) + return 0; + return 1; +} + +static int is_ata(acpi_handle handle) +{ + acpi_handle tmp; + + if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) || + (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) || + (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) || + (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp)))) + return 1; + + return 0; +} + +static int is_battery(acpi_handle handle) +{ + struct acpi_device_info *info; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + int ret = 1; + + if (!ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) + return 0; + info = buffer.pointer; + if (!(info->valid & ACPI_VALID_HID)) + ret = 0; + else + ret = !strcmp("PNP0C0A", info->hardware_id.value); + + kfree(buffer.pointer); + return ret; +} + +static int is_ejectable_bay(acpi_handle handle) +{ + acpi_handle phandle; + if (!is_ejectable(handle)) + return 0; + if (is_battery(handle) || is_ata(handle)) + return 1; + if (!acpi_get_parent(handle, &phandle) && is_ata(phandle)) + return 1; + return 0; +} + /** * is_dock_device - see if a device is on a dock station * @handle: acpi handle of the device @@ -209,11 +268,17 @@ static int is_dock(acpi_handle handle) */ int is_dock_device(acpi_handle handle) { - if (!dock_station) + struct dock_station *dock_station; + + if (!dock_station_count) return 0; - if (is_dock(handle) || find_dock_dependent_device(dock_station, handle)) + if (is_dock(handle)) return 1; + list_for_each_entry(dock_station, &dock_stations, sibiling) { + if (find_dock_dependent_device(dock_station, handle)) + return 1; + } return 0; } @@ -341,7 +406,7 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event) static void dock_event(struct dock_station *ds, u32 event, int num) { - struct device *dev = &dock_device->dev; + struct device *dev = &ds->dock_device->dev; char event_string[13]; char *envp[] = { event_string, NULL }; @@ -414,7 +479,7 @@ static void handle_dock(struct dock_station *ds, int dock) arg.type = ACPI_TYPE_INTEGER; arg.integer.value = dock; status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) printk(KERN_ERR PREFIX "%s - failed to execute _DCK\n", (char *)name_buffer.pointer); kfree(buffer.pointer); @@ -498,7 +563,7 @@ static int dock_in_progress(struct dock_station *ds) */ int register_dock_notifier(struct notifier_block *nb) { - if (!dock_station) + if (!dock_station_count) return -ENODEV; return atomic_notifier_chain_register(&dock_notifier_list, nb); @@ -512,7 +577,7 @@ EXPORT_SYMBOL_GPL(register_dock_notifier); */ void unregister_dock_notifier(struct notifier_block *nb) { - if (!dock_station) + if (!dock_station_count) return; atomic_notifier_chain_unregister(&dock_notifier_list, nb); @@ -535,20 +600,23 @@ register_hotplug_dock_device(acpi_handle handle, acpi_notify_handler handler, void *context) { struct dock_dependent_device *dd; + struct dock_station *dock_station; - if (!dock_station) + if (!dock_station_count) return -ENODEV; /* * make sure this handle is for a device dependent on the dock, * this would include the dock station itself */ - dd = find_dock_dependent_device(dock_station, handle); - if (dd) { - dd->handler = handler; - dd->context = context; - dock_add_hotplug_device(dock_station, dd); - return 0; + list_for_each_entry(dock_station, &dock_stations, sibiling) { + dd = find_dock_dependent_device(dock_station, handle); + if (dd) { + dd->handler = handler; + dd->context = context; + dock_add_hotplug_device(dock_station, dd); + return 0; + } } return -EINVAL; @@ -563,13 +631,16 @@ EXPORT_SYMBOL_GPL(register_hotplug_dock_device); void unregister_hotplug_dock_device(acpi_handle handle) { struct dock_dependent_device *dd; + struct dock_station *dock_station; - if (!dock_station) + if (!dock_station_count) return; - dd = find_dock_dependent_device(dock_station, handle); - if (dd) - dock_del_hotplug_device(dock_station, dd); + list_for_each_entry(dock_station, &dock_stations, sibiling) { + dd = find_dock_dependent_device(dock_station, handle); + if (dd) + dock_del_hotplug_device(dock_station, dd); + } } EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device); @@ -620,9 +691,28 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) { struct dock_station *ds = data; struct acpi_device *tmp; + int surprise_removal = 0; + /* + * According to acpi spec 3.0a, if a DEVICE_CHECK notification + * is sent and _DCK is present, it is assumed to mean an undock + * request. + */ + if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK) + event = ACPI_NOTIFY_EJECT_REQUEST; + + /* + * dock station: BUS_CHECK - docked or surprise removal + * DEVICE_CHECK - undocked + * other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal + * + * To simplify event handling, dock dependent device handler always + * get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and + * ACPI_NOTIFY_EJECT_REQUEST for removal + */ switch (event) { case ACPI_NOTIFY_BUS_CHECK: + case ACPI_NOTIFY_DEVICE_CHECK: if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle, &tmp)) { begin_dock(ds); @@ -638,20 +728,17 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) complete_dock(ds); dock_event(ds, event, DOCK_EVENT); dock_lock(ds, 1); + break; } - break; - case ACPI_NOTIFY_DEVICE_CHECK: - /* - * According to acpi spec 3.0a, if a DEVICE_CHECK notification - * is sent and _DCK is present, it is assumed to mean an - * undock request. This notify routine will only be called - * for objects defining _DCK, so we will fall through to eject - * request here. However, we will pass an eject request through - * to the driver who wish to hotplug. - */ + if (dock_present(ds) || dock_in_progress(ds)) + break; + /* This is a surprise removal */ + surprise_removal = 1; + event = ACPI_NOTIFY_EJECT_REQUEST; + /* Fall back */ case ACPI_NOTIFY_EJECT_REQUEST: begin_undock(ds); - if (immediate_undock) + if (immediate_undock || surprise_removal) handle_eject_request(ds, event); else dock_event(ds, event, UNDOCK_EVENT); @@ -707,6 +794,8 @@ fdd_out: static ssize_t show_docked(struct device *dev, struct device_attribute *attr, char *buf) { + struct dock_station *dock_station = *((struct dock_station **) + dev->platform_data); return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station)); } @@ -718,6 +807,8 @@ static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); static ssize_t show_flags(struct device *dev, struct device_attribute *attr, char *buf) { + struct dock_station *dock_station = *((struct dock_station **) + dev->platform_data); return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags); } @@ -730,6 +821,8 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; + struct dock_station *dock_station = *((struct dock_station **) + dev->platform_data); if (!count) return -EINVAL; @@ -747,6 +840,8 @@ static ssize_t show_dock_uid(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long lbuf; + struct dock_station *dock_station = *((struct dock_station **) + dev->platform_data); acpi_status status = acpi_evaluate_integer(dock_station->handle, "_UID", NULL, &lbuf); if (ACPI_FAILURE(status)) @@ -768,6 +863,8 @@ static int dock_add(acpi_handle handle) int ret; acpi_status status; struct dock_dependent_device *dd; + struct dock_station *dock_station; + struct platform_device *dock_device; /* allocate & initialize the dock_station private data */ dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL); @@ -777,22 +874,34 @@ static int dock_add(acpi_handle handle) dock_station->last_dock_time = jiffies - HZ; INIT_LIST_HEAD(&dock_station->dependent_devices); INIT_LIST_HEAD(&dock_station->hotplug_devices); + INIT_LIST_HEAD(&dock_station->sibiling); spin_lock_init(&dock_station->dd_lock); mutex_init(&dock_station->hp_lock); ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); /* initialize platform device stuff */ - dock_device = - platform_device_register_simple(dock_device_name, 0, NULL, 0); + dock_station->dock_device = + platform_device_register_simple(dock_device_name, + dock_station_count, NULL, 0); + dock_device = dock_station->dock_device; if (IS_ERR(dock_device)) { kfree(dock_station); dock_station = NULL; return PTR_ERR(dock_device); } + platform_device_add_data(dock_device, &dock_station, + sizeof(struct dock_station *)); /* we want the dock device to send uevents */ dock_device->dev.uevent_suppress = 0; + if (is_dock(handle)) + dock_station->flags |= DOCK_IS_DOCK; + if (is_ata(handle)) + dock_station->flags |= DOCK_IS_ATA; + if (is_battery(handle)) + dock_station->flags |= DOCK_IS_BAT; + ret = device_create_file(&dock_device->dev, &dev_attr_docked); if (ret) { printk("Error %d adding sysfs file\n", ret); @@ -858,8 +967,8 @@ static int dock_add(acpi_handle handle) goto dock_add_err; } - printk(KERN_INFO PREFIX "%s\n", ACPI_DOCK_DRIVER_DESCRIPTION); - + dock_station_count++; + list_add(&dock_station->sibiling, &dock_stations); return 0; dock_add_err: @@ -878,12 +987,13 @@ dock_add_err_unregister: /** * dock_remove - free up resources related to the dock station */ -static int dock_remove(void) +static int dock_remove(struct dock_station *dock_station) { struct dock_dependent_device *dd, *tmp; acpi_status status; + struct platform_device *dock_device = dock_station->dock_device; - if (!dock_station) + if (!dock_station_count) return 0; /* remove dependent devices */ @@ -923,41 +1033,58 @@ static int dock_remove(void) static acpi_status find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) { - int *count = context; acpi_status status = AE_OK; if (is_dock(handle)) { if (dock_add(handle) >= 0) { - (*count)++; status = AE_CTRL_TERMINATE; } } return status; } -static int __init dock_init(void) +static acpi_status +find_bay(acpi_handle handle, u32 lvl, void *context, void **rv) { - int num = 0; - - dock_station = NULL; + /* If bay is in a dock, it's already handled */ + if (is_ejectable_bay(handle) && !is_dock_device(handle)) + dock_add(handle); + return AE_OK; +} +static int __init dock_init(void) +{ if (acpi_disabled) return 0; /* look for a dock station */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, find_dock, &num, NULL); + ACPI_UINT32_MAX, find_dock, NULL, NULL); - if (!num) - printk(KERN_INFO "No dock devices found.\n"); + /* look for bay */ + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, find_bay, NULL, NULL); + if (!dock_station_count) { + printk(KERN_INFO PREFIX "No dock devices found.\n"); + return 0; + } + printk(KERN_INFO PREFIX "%s: %d docks/bays found\n", + ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); return 0; } static void __exit dock_exit(void) { - dock_remove(); + struct dock_station *dock_station; + + list_for_each_entry(dock_station, &dock_stations, sibiling) + dock_remove(dock_station); } -postcore_initcall(dock_init); +/* + * Must be called before drivers of devices in dock, otherwise we can't know + * which devices are in a dock + */ +subsys_initcall(dock_init); module_exit(dock_exit); -- cgit v0.10.2 From 6bd00a61ab63d4ceb635ae0316353c11c900b8d8 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:04:29 +0800 Subject: ACPI: introduce notifier change to avoid duplicates The battery driver already registers notification handler. To avoid registering notification handler again, introduce a notifier chain in global system notifier handler and use it in dock driver. Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index ccae305..0dc4494 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -496,6 +496,19 @@ static int acpi_bus_check_scope(struct acpi_device *device) return 0; } +static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list); +int register_acpi_bus_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_bus_notify_list, nb); +} +EXPORT_SYMBOL_GPL(register_acpi_bus_notifier); + +void unregister_acpi_bus_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb); +} +EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier); + /** * acpi_bus_notify * --------------- @@ -506,6 +519,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) int result = 0; struct acpi_device *device = NULL; + blocking_notifier_call_chain(&acpi_bus_notify_list, + type, (void *)handle); if (acpi_bus_get_device(handle, &device)) return; diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 799a0fd..2563bc6 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -748,6 +748,28 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) } } +static int acpi_dock_notifier_call(struct notifier_block *this, + unsigned long event, void *data) +{ + struct dock_station *dock_station; + acpi_handle handle = (acpi_handle)data; + + if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK + && event != ACPI_NOTIFY_EJECT_REQUEST) + return 0; + list_for_each_entry(dock_station, &dock_stations, sibiling) { + if (dock_station->handle == handle) { + dock_notify(handle, event, dock_station); + return 0 ; + } + } + return 0; +} + +static struct notifier_block dock_acpi_notifier = { + .notifier_call = acpi_dock_notifier_call, +}; + /** * find_dock_devices - find devices on the dock station * @handle: the handle of the device we are examining @@ -861,7 +883,6 @@ static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL); static int dock_add(acpi_handle handle) { int ret; - acpi_status status; struct dock_dependent_device *dd; struct dock_station *dock_station; struct platform_device *dock_device; @@ -956,23 +977,10 @@ static int dock_add(acpi_handle handle) } add_dock_dependent_device(dock_station, dd); - /* register for dock events */ - status = acpi_install_notify_handler(dock_station->handle, - ACPI_SYSTEM_NOTIFY, - dock_notify, dock_station); - - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Error installing notify handler\n"); - ret = -ENODEV; - goto dock_add_err; - } - dock_station_count++; list_add(&dock_station->sibiling, &dock_stations); return 0; -dock_add_err: - kfree(dd); dock_add_err_unregister: device_remove_file(&dock_device->dev, &dev_attr_docked); device_remove_file(&dock_device->dev, &dev_attr_undock); @@ -990,7 +998,6 @@ dock_add_err_unregister: static int dock_remove(struct dock_station *dock_station) { struct dock_dependent_device *dd, *tmp; - acpi_status status; struct platform_device *dock_device = dock_station->dock_device; if (!dock_station_count) @@ -1001,13 +1008,6 @@ static int dock_remove(struct dock_station *dock_station) list) kfree(dd); - /* remove dock notify handler */ - status = acpi_remove_notify_handler(dock_station->handle, - ACPI_SYSTEM_NOTIFY, - dock_notify); - if (ACPI_FAILURE(status)) - printk(KERN_ERR "Error removing notify handler\n"); - /* cleanup sysfs */ device_remove_file(&dock_device->dev, &dev_attr_docked); device_remove_file(&dock_device->dev, &dev_attr_undock); @@ -1069,6 +1069,7 @@ static int __init dock_init(void) return 0; } + register_acpi_bus_notifier(&dock_acpi_notifier); printk(KERN_INFO PREFIX "%s: %d docks/bays found\n", ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); return 0; @@ -1078,6 +1079,7 @@ static void __exit dock_exit(void) { struct dock_station *dock_station; + unregister_acpi_bus_notifier(&dock_acpi_notifier); list_for_each_entry(dock_station, &dock_stations, sibiling) dock_remove(dock_station); } diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index a5ac0bc..f74f8826 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -327,6 +327,9 @@ int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); + +extern int register_acpi_bus_notifier(struct notifier_block *nb); +extern void unregister_acpi_bus_notifier(struct notifier_block *nb); /* * External Functions */ -- cgit v0.10.2 From 19cd847ab24fefe9e50101ec94479e0400a08650 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 28 Aug 2008 10:05:06 +0800 Subject: ACPI: fix hotplug race The hotplug notification handler and drivers' notification handler all run in one workqueue. Before hotplug removes an acpi device, the device driver's notification handler is already be recorded to run just after global notification handler. After hotplug notification handler runs, acpica will notice a NULL notification handler and crash. So now we run run hotplug in another workqueue and wait for all acpi notication handlers finish. This was found in battery hotplug, but actually all hotplug can be affected. Signed-off-by: Zhang Rui Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 2563bc6..4b395b1 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -748,6 +748,20 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) } } +struct dock_data { + acpi_handle handle; + unsigned long event; + struct dock_station *ds; +}; + +static void acpi_dock_deferred_cb(void *context) +{ + struct dock_data *data = (struct dock_data *)context; + + dock_notify(data->handle, data->event, data->ds); + kfree(data); +} + static int acpi_dock_notifier_call(struct notifier_block *this, unsigned long event, void *data) { @@ -759,7 +773,16 @@ static int acpi_dock_notifier_call(struct notifier_block *this, return 0; list_for_each_entry(dock_station, &dock_stations, sibiling) { if (dock_station->handle == handle) { - dock_notify(handle, event, dock_station); + struct dock_data *dock_data; + + dock_data = kmalloc(sizeof(*dock_data), GFP_KERNEL); + if (!dock_data) + return 0; + dock_data->handle = handle; + dock_data->event = event; + dock_data->ds = dock_station; + acpi_os_hotplug_execute(acpi_dock_deferred_cb, + dock_data); return 0 ; } } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 235a138..750e0df 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -682,6 +682,22 @@ static void acpi_os_execute_deferred(struct work_struct *work) return; } +static void acpi_os_execute_hp_deferred(struct work_struct *work) +{ + struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); + if (!dpc) { + printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); + return; + } + + acpi_os_wait_events_complete(NULL); + + dpc->function(dpc->context); + kfree(dpc); + + return; +} + /******************************************************************************* * * FUNCTION: acpi_os_execute @@ -697,12 +713,13 @@ static void acpi_os_execute_deferred(struct work_struct *work) * ******************************************************************************/ -acpi_status acpi_os_execute(acpi_execute_type type, - acpi_osd_exec_callback function, void *context) +static acpi_status __acpi_os_execute(acpi_execute_type type, + acpi_osd_exec_callback function, void *context, int hp) { acpi_status status = AE_OK; struct acpi_os_dpc *dpc; struct workqueue_struct *queue; + int ret; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); @@ -726,9 +743,17 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq; - if (!queue_work(queue, &dpc->work)) { + if (!hp) { + INIT_WORK(&dpc->work, acpi_os_execute_deferred); + queue = (type == OSL_NOTIFY_HANDLER) ? + kacpi_notify_wq : kacpid_wq; + ret = queue_work(queue, &dpc->work); + } else { + INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred); + ret = schedule_work(&dpc->work); + } + + if (!ret) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Call to queue_work() failed.\n")); status = AE_ERROR; @@ -737,8 +762,19 @@ acpi_status acpi_os_execute(acpi_execute_type type, return_ACPI_STATUS(status); } +acpi_status acpi_os_execute(acpi_execute_type type, + acpi_osd_exec_callback function, void *context) +{ + return __acpi_os_execute(type, function, context, 0); +} EXPORT_SYMBOL(acpi_os_execute); +acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, + void *context) +{ + return __acpi_os_execute(0, function, context, 1); +} + void acpi_os_wait_events_complete(void *context) { flush_workqueue(kacpid_wq); diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 3f93a6b..b91440a 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -193,6 +193,9 @@ acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context); +acpi_status +acpi_os_hotplug_execute(acpi_osd_exec_callback function, void *context); + void acpi_os_wait_events_complete(void *context); void acpi_os_sleep(acpi_integer milliseconds); -- cgit v0.10.2 From f730ae1838635a02aa60834762c61566911d004c Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:05:45 +0800 Subject: libata: remove functions now handed by ACPI dock driver dock driver can handle ata(bay) hotplug now. dock driver already handles _EJ0 and _STA, so remove them. Also libata doesn't need register notification handler anymore. Signed-off-by: Shaohua Li Acked-by: Tejun Heo Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 4b395b1..f19f643 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -738,7 +738,8 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) /* Fall back */ case ACPI_NOTIFY_EJECT_REQUEST: begin_undock(ds); - if (immediate_undock || surprise_removal) + if ((immediate_undock && !(ds->flags & DOCK_IS_ATA)) + || surprise_removal) handle_eject_request(ds, event); else dock_event(ds, event, UNDOCK_EVENT); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 9330b79..97727be 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -120,21 +120,6 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; } -static void ata_acpi_eject_device(acpi_handle handle) -{ - struct acpi_object_list arg_list; - union acpi_object arg; - - arg_list.count = 1; - arg_list.pointer = &arg; - arg.type = ACPI_TYPE_INTEGER; - arg.integer.value = 1; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0", - &arg_list, NULL))) - printk(KERN_ERR "Failed to evaluate _EJ0!\n"); -} - /* @ap and @dev are the same as ata_acpi_handle_hotplug() */ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) { @@ -157,7 +142,6 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) * @ap: ATA port ACPI event occurred * @dev: ATA device ACPI event occurred (can be NULL) * @event: ACPI event which occurred - * @is_dock_event: boolean indicating whether the event was a dock one * * All ACPI bay / device realted events end up in this function. If * the event is port-wide @dev is NULL. If the event is specific to a @@ -171,115 +155,58 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) * ACPI notify handler context. May sleep. */ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, - u32 event, int is_dock_event) + u32 event) { - char event_string[12]; - char *envp[] = { event_string, NULL }; struct ata_eh_info *ehi = &ap->link.eh_info; - struct kobject *kobj = NULL; int wait = 0; unsigned long flags; - acpi_handle handle, tmphandle; - unsigned long sta; - acpi_status status; + acpi_handle handle; - if (dev) { - if (dev->sdev) - kobj = &dev->sdev->sdev_gendev.kobj; + if (dev) handle = dev->acpi_handle; - } else { - kobj = &ap->dev->kobj; + else handle = ap->acpi_handle; - } - - status = acpi_get_handle(handle, "_EJ0", &tmphandle); - if (ACPI_FAILURE(status)) - /* This device does not support hotplug */ - return; - - if (event == ACPI_NOTIFY_BUS_CHECK || - event == ACPI_NOTIFY_DEVICE_CHECK) - status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); spin_lock_irqsave(ap->lock, flags); - + /* + * When dock driver calls into the routine, it will always use + * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and + * ACPI_NOTIFY_EJECT_REQUEST for remove + */ switch (event) { case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: ata_ehi_push_desc(ehi, "ACPI event"); - if (ACPI_FAILURE(status)) { - ata_port_printk(ap, KERN_ERR, - "acpi: failed to determine bay status (0x%x)\n", - status); - break; - } - - if (sta) { - ata_ehi_hotplugged(ehi); - ata_port_freeze(ap); - } else { - /* The device has gone - unplug it */ - ata_acpi_detach_device(ap, dev); - wait = 1; - } + ata_ehi_hotplugged(ehi); + ata_port_freeze(ap); break; case ACPI_NOTIFY_EJECT_REQUEST: ata_ehi_push_desc(ehi, "ACPI event"); - if (!is_dock_event) - break; - - /* undock event - immediate unplug */ ata_acpi_detach_device(ap, dev); wait = 1; break; } - /* make sure kobj doesn't go away while ap->lock is released */ - kobject_get(kobj); - spin_unlock_irqrestore(ap->lock, flags); - if (wait) { + if (wait) ata_port_wait_eh(ap); - ata_acpi_eject_device(handle); - } - - if (kobj && !is_dock_event) { - sprintf(event_string, "BAY_EVENT=%d", event); - kobject_uevent_env(kobj, KOBJ_CHANGE, envp); - } - - kobject_put(kobj); } static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) { struct ata_device *dev = data; - ata_acpi_handle_hotplug(dev->link->ap, dev, event, 1); + ata_acpi_handle_hotplug(dev->link->ap, dev, event); } static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data) { struct ata_port *ap = data; - ata_acpi_handle_hotplug(ap, NULL, event, 1); -} - -static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) -{ - struct ata_device *dev = data; - - ata_acpi_handle_hotplug(dev->link->ap, dev, event, 0); -} - -static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) -{ - struct ata_port *ap = data; - - ata_acpi_handle_hotplug(ap, NULL, event, 0); + ata_acpi_handle_hotplug(ap, NULL, event); } /** @@ -315,9 +242,6 @@ void ata_acpi_associate(struct ata_host *host) ata_acpi_associate_ide_port(ap); if (ap->acpi_handle) { - acpi_install_notify_handler(ap->acpi_handle, - ACPI_SYSTEM_NOTIFY, - ata_acpi_ap_notify, ap); /* we might be on a docking station */ register_hotplug_dock_device(ap->acpi_handle, ata_acpi_ap_notify_dock, ap); @@ -327,9 +251,6 @@ void ata_acpi_associate(struct ata_host *host) struct ata_device *dev = &ap->link.device[j]; if (dev->acpi_handle) { - acpi_install_notify_handler(dev->acpi_handle, - ACPI_SYSTEM_NOTIFY, - ata_acpi_dev_notify, dev); /* we might be on a docking station */ register_hotplug_dock_device(dev->acpi_handle, ata_acpi_dev_notify_dock, dev); -- cgit v0.10.2 From 1253f7aabfebc51446dbec5c8895c5c8846dfe06 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:06:16 +0800 Subject: dock: introduce .uevent for devices in dock, eg libata dock's uevent reported itself, not ata. It might be difficult to find an ata device just according to a dock. This patch introduces docking ops for each device in a dock. when docking, dock driver can send device specific uevent. This should help dock station too (not just bay) Signed-off-by: Shaohua Li Acked-by: Tejun Heo Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index f19f643..ac7dfef 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -75,7 +75,7 @@ struct dock_dependent_device { struct list_head list; struct list_head hotplug_list; acpi_handle handle; - acpi_notify_handler handler; + struct acpi_dock_ops *ops; void *context; }; @@ -385,8 +385,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event) * First call driver specific hotplug functions */ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) { - if (dd->handler) - dd->handler(dd->handle, event, dd->context); + if (dd->ops && dd->ops->handler) + dd->ops->handler(dd->handle, event, dd->context); } /* @@ -409,6 +409,7 @@ static void dock_event(struct dock_station *ds, u32 event, int num) struct device *dev = &ds->dock_device->dev; char event_string[13]; char *envp[] = { event_string, NULL }; + struct dock_dependent_device *dd; if (num == UNDOCK_EVENT) sprintf(event_string, "EVENT=undock"); @@ -419,7 +420,14 @@ static void dock_event(struct dock_station *ds, u32 event, int num) * Indicate that the status of the dock station has * changed. */ - kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); + if (num == DOCK_EVENT) + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); + + list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) + if (dd->ops && dd->ops->uevent) + dd->ops->uevent(dd->handle, event, dd->context); + if (num != DOCK_EVENT) + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); } /** @@ -588,7 +596,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); /** * register_hotplug_dock_device - register a hotplug function * @handle: the handle of the device - * @handler: the acpi_notifier_handler to call after docking + * @ops: handlers to call after docking * @context: device specific data * * If a driver would like to perform a hotplug operation after a dock @@ -596,7 +604,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); * the dock driver after _DCK is executed. */ int -register_hotplug_dock_device(acpi_handle handle, acpi_notify_handler handler, +register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, void *context) { struct dock_dependent_device *dd; @@ -612,7 +620,7 @@ register_hotplug_dock_device(acpi_handle handle, acpi_notify_handler handler, list_for_each_entry(dock_station, &dock_stations, sibiling) { dd = find_dock_dependent_device(dock_station, handle); if (dd) { - dd->handler = handler; + dd->ops = ops; dd->context = context; dock_add_hotplug_device(dock_station, dd); return 0; diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 97727be..c012307 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -209,6 +209,46 @@ static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data) ata_acpi_handle_hotplug(ap, NULL, event); } +static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev, + u32 event) +{ + struct kobject *kobj = NULL; + char event_string[20]; + char *envp[] = { event_string, NULL }; + + if (dev) { + if (dev->sdev) + kobj = &dev->sdev->sdev_gendev.kobj; + } else + kobj = &ap->dev->kobj; + + if (kobj) { + snprintf(event_string, 20, "BAY_EVENT=%d", event); + kobject_uevent_env(kobj, KOBJ_CHANGE, envp); + } +} + +static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data) +{ + ata_acpi_uevent(data, NULL, event); +} + +static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data) +{ + struct ata_device *dev = data; + ata_acpi_uevent(dev->link->ap, dev, event); +} + +static struct acpi_dock_ops ata_acpi_dev_dock_ops = { + .handler = ata_acpi_dev_notify_dock, + .uevent = ata_acpi_dev_uevent, +}; + +static struct acpi_dock_ops ata_acpi_ap_dock_ops = { + .handler = ata_acpi_ap_notify_dock, + .uevent = ata_acpi_ap_uevent, +}; + /** * ata_acpi_associate - associate ATA host with ACPI objects * @host: target ATA host @@ -244,7 +284,7 @@ void ata_acpi_associate(struct ata_host *host) if (ap->acpi_handle) { /* we might be on a docking station */ register_hotplug_dock_device(ap->acpi_handle, - ata_acpi_ap_notify_dock, ap); + &ata_acpi_ap_dock_ops, ap); } for (j = 0; j < ata_link_max_devices(&ap->link); j++) { @@ -253,7 +293,7 @@ void ata_acpi_associate(struct ata_host *host) if (dev->acpi_handle) { /* we might be on a docking station */ register_hotplug_dock_device(dev->acpi_handle, - ata_acpi_dev_notify_dock, dev); + &ata_acpi_dev_dock_ops, dev); } } } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a3e4705..db54c5e 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -169,7 +169,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, } - +static struct acpi_dock_ops acpiphp_dock_ops = { + .handler = handle_hotplug_event_func, +}; /* callback routine to register each ACPI PCI slot object */ static acpi_status @@ -285,7 +287,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) */ newfunc->flags &= ~FUNC_HAS_EJ0; if (register_hotplug_dock_device(handle, - handle_hotplug_event_func, newfunc)) + &acpiphp_dock_ops, newfunc)) dbg("failed to register dock device\n"); /* we need to be notified when dock events happen diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index e5f38e5..4f5042a 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -115,12 +115,17 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type); /*-------------------------------------------------------------------------- Dock Station -------------------------------------------------------------------------- */ +struct acpi_dock_ops { + acpi_notify_handler handler; + acpi_notify_handler uevent; +}; + #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE) extern int is_dock_device(acpi_handle handle); extern int register_dock_notifier(struct notifier_block *nb); extern void unregister_dock_notifier(struct notifier_block *nb); extern int register_hotplug_dock_device(acpi_handle handle, - acpi_notify_handler handler, + struct acpi_dock_ops *ops, void *context); extern void unregister_hotplug_dock_device(acpi_handle handle); #else @@ -136,7 +141,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb) { } static inline int register_hotplug_dock_device(acpi_handle handle, - acpi_notify_handler handler, + struct acpi_dock_ops *ops, void *context) { return -ENODEV; -- cgit v0.10.2 From 4be9309d15e88e4a1e4a78deb52eb88c7da38c99 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:06:44 +0800 Subject: bay: remove driver, all functions now handled by dock driver Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 735f5ea..3919d6d 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -160,15 +160,8 @@ config ACPI_DOCK tristate "Dock" depends on EXPERIMENTAL help - This driver adds support for ACPI controlled docking stations - -config ACPI_BAY - tristate "Removable Drive Bay (EXPERIMENTAL)" - depends on EXPERIMENTAL - depends on ACPI_DOCK - help - This driver adds support for ACPI controlled removable drive - bays such as the IBM ultrabay or the Dell Module Bay. + This driver adds support for ACPI controlled docking stations and removable + drive bays such as the IBM ultrabay or the Dell Module Bay. config ACPI_PROCESSOR tristate "Processor" diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 52a4cd4..ad4bfd5 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -45,7 +45,6 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o obj-$(CONFIG_ACPI_BUTTON) += button.o obj-$(CONFIG_ACPI_FAN) += fan.o obj-$(CONFIG_ACPI_DOCK) += dock.o -obj-$(CONFIG_ACPI_BAY) += bay.o obj-$(CONFIG_ACPI_VIDEO) += video.o obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c deleted file mode 100644 index 61b6c5b..0000000 --- a/drivers/acpi/bay.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * bay.c - ACPI removable drive bay driver - * - * Copyright (C) 2006 Kristen Carlson Accardi - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -ACPI_MODULE_NAME("bay"); -MODULE_AUTHOR("Kristen Carlson Accardi"); -MODULE_DESCRIPTION("ACPI Removable Drive Bay Driver"); -MODULE_LICENSE("GPL"); -#define ACPI_BAY_CLASS "bay" -#define ACPI_BAY_COMPONENT 0x10000000 -#define _COMPONENT ACPI_BAY_COMPONENT -#define bay_dprintk(h,s) {\ - char prefix[80] = {'\0'};\ - struct acpi_buffer buffer = {sizeof(prefix), prefix};\ - acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\ - printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); } -static void bay_notify(acpi_handle handle, u32 event, void *data); - -static const struct acpi_device_id bay_device_ids[] = { - {"LNXIOBAY", 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, bay_device_ids); - -struct bay { - acpi_handle handle; - char *name; - struct list_head list; - struct platform_device *pdev; -}; - -static LIST_HEAD(drive_bays); - - -/***************************************************************************** - * Drive Bay functions * - *****************************************************************************/ -/** - * is_ejectable - see if a device is ejectable - * @handle: acpi handle of the device - * - * If an acpi object has a _EJ0 method, then it is ejectable - */ -static int is_ejectable(acpi_handle handle) -{ - acpi_status status; - acpi_handle tmp; - - status = acpi_get_handle(handle, "_EJ0", &tmp); - if (ACPI_FAILURE(status)) - return 0; - return 1; -} - -/** - * bay_present - see if the bay device is present - * @bay: the drive bay - * - * execute the _STA method. - */ -static int bay_present(struct bay *bay) -{ - unsigned long sta; - acpi_status status; - - if (bay) { - status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta); - if (ACPI_SUCCESS(status) && sta) - return 1; - } - return 0; -} - -/** - * eject_device - respond to an eject request - * @handle - the device to eject - * - * Call this devices _EJ0 method. - */ -static void eject_device(acpi_handle handle) -{ - struct acpi_object_list arg_list; - union acpi_object arg; - - bay_dprintk(handle, "Ejecting device"); - - arg_list.count = 1; - arg_list.pointer = &arg; - arg.type = ACPI_TYPE_INTEGER; - arg.integer.value = 1; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0", - &arg_list, NULL))) - pr_debug("Failed to evaluate _EJ0!\n"); -} - -/* - * show_present - read method for "present" file in sysfs - */ -static ssize_t show_present(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct bay *bay = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay)); - -} -static DEVICE_ATTR(present, S_IRUGO, show_present, NULL); - -/* - * write_eject - write method for "eject" file in sysfs - */ -static ssize_t write_eject(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct bay *bay = dev_get_drvdata(dev); - - if (!count) - return -EINVAL; - - eject_device(bay->handle); - return count; -} -static DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject); - -/** - * is_ata - see if a device is an ata device - * @handle: acpi handle of the device - * - * If an acpi object has one of 4 ATA ACPI methods defined, - * then it is an ATA device - */ -static int is_ata(acpi_handle handle) -{ - acpi_handle tmp; - - if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) || - (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) || - (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) || - (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp)))) - return 1; - - return 0; -} - -/** - * parent_is_ata(acpi_handle handle) - * - */ -static int parent_is_ata(acpi_handle handle) -{ - acpi_handle phandle; - - if (acpi_get_parent(handle, &phandle)) - return 0; - - return is_ata(phandle); -} - -/** - * is_ejectable_bay - see if a device is an ejectable drive bay - * @handle: acpi handle of the device - * - * If an acpi object is ejectable and has one of the ACPI ATA - * methods defined, then we can safely call it an ejectable - * drive bay - */ -static int is_ejectable_bay(acpi_handle handle) -{ - if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle)) - return 1; - return 0; -} - -#if 0 -/** - * eject_removable_drive - try to eject this drive - * @dev : the device structure of the drive - * - * If a device is a removable drive that requires an _EJ0 method - * to be executed in order to safely remove from the system, do - * it. ATM - always returns success - */ -int eject_removable_drive(struct device *dev) -{ - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); - - if (handle) { - bay_dprintk(handle, "Got device handle"); - if (is_ejectable_bay(handle)) - eject_device(handle); - } else { - printk("No acpi handle for device\n"); - } - - /* should I return an error code? */ - return 0; -} -EXPORT_SYMBOL_GPL(eject_removable_drive); -#endif /* 0 */ - -static int acpi_bay_add_fs(struct bay *bay) -{ - int ret; - struct device *dev = &bay->pdev->dev; - - ret = device_create_file(dev, &dev_attr_present); - if (ret) - goto add_fs_err; - ret = device_create_file(dev, &dev_attr_eject); - if (ret) { - device_remove_file(dev, &dev_attr_present); - goto add_fs_err; - } - return 0; - - add_fs_err: - bay_dprintk(bay->handle, "Error adding sysfs files\n"); - return ret; -} - -static void acpi_bay_remove_fs(struct bay *bay) -{ - struct device *dev = &bay->pdev->dev; - - /* cleanup sysfs */ - device_remove_file(dev, &dev_attr_present); - device_remove_file(dev, &dev_attr_eject); -} - -static int bay_is_dock_device(acpi_handle handle) -{ - acpi_handle parent; - - acpi_get_parent(handle, &parent); - - /* if the device or it's parent is dependent on the - * dock, then we are a dock device - */ - return (is_dock_device(handle) || is_dock_device(parent)); -} - -static int bay_add(acpi_handle handle, int id) -{ - acpi_status status; - struct bay *new_bay; - struct platform_device *pdev; - struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer); - - bay_dprintk(handle, "Adding notify handler"); - - /* - * Initialize bay device structure - */ - new_bay = kzalloc(sizeof(*new_bay), GFP_ATOMIC); - INIT_LIST_HEAD(&new_bay->list); - new_bay->handle = handle; - new_bay->name = (char *)nbuffer.pointer; - - /* initialize platform device stuff */ - pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0); - if (IS_ERR(pdev)) { - printk(KERN_ERR PREFIX "Error registering bay device\n"); - goto bay_add_err; - } - new_bay->pdev = pdev; - platform_set_drvdata(pdev, new_bay); - - /* - * we want the bay driver to be able to send uevents - */ - pdev->dev.uevent_suppress = 0; - - /* register for events on this device */ - status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, - bay_notify, new_bay); - if (ACPI_FAILURE(status)) { - printk(KERN_INFO PREFIX "Error installing bay notify handler\n"); - platform_device_unregister(new_bay->pdev); - goto bay_add_err; - } - - if (acpi_bay_add_fs(new_bay)) { - acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, - bay_notify); - platform_device_unregister(new_bay->pdev); - goto bay_add_err; - } - - /* if we are on a dock station, we should register for dock - * notifications. - */ - if (bay_is_dock_device(handle)) { - bay_dprintk(handle, "Is dependent on dock\n"); - register_hotplug_dock_device(handle, bay_notify, new_bay); - } - list_add(&new_bay->list, &drive_bays); - printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name); - return 0; - -bay_add_err: - kfree(new_bay->name); - kfree(new_bay); - return -ENODEV; -} - -/** - * bay_notify - act upon an acpi bay notification - * @handle: the bay handle - * @event: the acpi event - * @data: our driver data struct - * - */ -static void bay_notify(acpi_handle handle, u32 event, void *data) -{ - struct bay *bay_dev = (struct bay *)data; - struct device *dev = &bay_dev->pdev->dev; - char event_string[12]; - char *envp[] = { event_string, NULL }; - - bay_dprintk(handle, "Bay event"); - sprintf(event_string, "BAY_EVENT=%d", event); - kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); -} - -static acpi_status -find_bay(acpi_handle handle, u32 lvl, void *context, void **rv) -{ - int *count = (int *)context; - - /* - * there could be more than one ejectable bay. - * so, just return AE_OK always so that every object - * will be checked. - */ - if (is_ejectable_bay(handle)) { - bay_dprintk(handle, "found ejectable bay"); - if (!bay_add(handle, *count)) - (*count)++; - } - return AE_OK; -} - -static int __init bay_init(void) -{ - int bays = 0; - - INIT_LIST_HEAD(&drive_bays); - - if (acpi_disabled) - return -ENODEV; - - /* look for dockable drive bays */ - acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, find_bay, &bays, NULL); - - if (!bays) - return -ENODEV; - - return 0; -} - -static void __exit bay_exit(void) -{ - struct bay *bay, *tmp; - - list_for_each_entry_safe(bay, tmp, &drive_bays, list) { - if (is_dock_device(bay->handle)) - unregister_hotplug_dock_device(bay->handle); - acpi_bay_remove_fs(bay); - acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY, - bay_notify); - platform_device_unregister(bay->pdev); - kfree(bay->name); - kfree(bay); - } -} - -postcore_initcall(bay_init); -module_exit(bay_exit); - -- cgit v0.10.2 From 61b836958371c717d1e6d4fea1d2c512969ad20b Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:07:14 +0800 Subject: dock: fix for ATA bay in a dock station an ATA bay can be in a dock and itself can be ejected separately. This patch handles such eject bay. Found by Holger. Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index ac7dfef..c877cc5 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -609,6 +609,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, { struct dock_dependent_device *dd; struct dock_station *dock_station; + int ret = -EINVAL; if (!dock_station_count) return -ENODEV; @@ -618,16 +619,21 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, * this would include the dock station itself */ list_for_each_entry(dock_station, &dock_stations, sibiling) { + /* + * An ATA bay can be in a dock and itself can be ejected + * seperately, so there are two 'dock stations' which need the + * ops + */ dd = find_dock_dependent_device(dock_station, handle); if (dd) { dd->ops = ops; dd->context = context; dock_add_hotplug_device(dock_station, dd); - return 0; + ret = 0; } } - return -EINVAL; + return ret; } EXPORT_SYMBOL_GPL(register_hotplug_dock_device); @@ -1078,8 +1084,8 @@ find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) static acpi_status find_bay(acpi_handle handle, u32 lvl, void *context, void **rv) { - /* If bay is in a dock, it's already handled */ - if (is_ejectable_bay(handle) && !is_dock_device(handle)) + /* If bay is a dock, it's already handled */ + if (is_ejectable_bay(handle) && !is_dock(handle)) dock_add(handle); return AE_OK; } -- cgit v0.10.2 From 8652b00fd6416773f113dab3dfa0d4509def825b Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Aug 2008 10:07:45 +0800 Subject: dock: add 'type' sysfs file add a sysfs file to present dock type. Suggested by Holger. Signed-off-by: Shaohua Li Signed-off-by: Len Brown diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index c877cc5..b7d1581 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -911,6 +911,26 @@ static ssize_t show_dock_uid(struct device *dev, } static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL); +static ssize_t show_dock_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dock_station *dock_station = *((struct dock_station **) + dev->platform_data); + char *type; + + if (dock_station->flags & DOCK_IS_DOCK) + type = "dock_station"; + else if (dock_station->flags & DOCK_IS_ATA) + type = "ata_bay"; + else if (dock_station->flags & DOCK_IS_BAT) + type = "battery_bay"; + else + type = "unknown"; + + return snprintf(buf, PAGE_SIZE, "%s\n", type); +} +static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL); + /** * dock_add - add a new dock station * @handle: the dock station handle @@ -999,6 +1019,9 @@ static int dock_add(acpi_handle handle) dock_station = NULL; return ret; } + ret = device_create_file(&dock_device->dev, &dev_attr_type); + if (ret) + printk(KERN_ERR"Error %d adding sysfs file\n", ret); /* Find dependent devices */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, @@ -1020,6 +1043,7 @@ static int dock_add(acpi_handle handle) return 0; dock_add_err_unregister: + device_remove_file(&dock_device->dev, &dev_attr_type); device_remove_file(&dock_device->dev, &dev_attr_docked); device_remove_file(&dock_device->dev, &dev_attr_undock); device_remove_file(&dock_device->dev, &dev_attr_uid); @@ -1047,6 +1071,7 @@ static int dock_remove(struct dock_station *dock_station) kfree(dd); /* cleanup sysfs */ + device_remove_file(&dock_device->dev, &dev_attr_type); device_remove_file(&dock_device->dev, &dev_attr_docked); device_remove_file(&dock_device->dev, &dev_attr_undock); device_remove_file(&dock_device->dev, &dev_attr_uid); -- cgit v0.10.2 From aa88f169d6fc4305125b6917d9d5f2e08211f011 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Wed, 24 Sep 2008 11:46:48 +0900 Subject: sh: ap325rxa: create CPLD data area in mtd AP320 and AP325RXA has CPLD data in NOR Flash. If this area erased, this board can not boot. This patch create CPLD data area and set writeable mask bit. Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Paul Mundt diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index fd16125..00e632f 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -52,20 +52,33 @@ static struct platform_device smc9118_device = { }, }; +/* + * AP320 and AP325RXA has CPLD data in NOR Flash(0xA80000-0xABFFFF). + * If this area erased, this board can not boot. + */ static struct mtd_partition ap325rxa_nor_flash_partitions[] = { { - .name = "uboot", - .offset = 0, - .size = (1 * 1024 * 1024), - .mask_flags = MTD_WRITEABLE, /* Read-only */ + .name = "uboot", + .offset = 0, + .size = (1 * 1024 * 1024), + .mask_flags = MTD_WRITEABLE, /* Read-only */ + }, { + .name = "kernel", + .offset = MTDPART_OFS_APPEND, + .size = (2 * 1024 * 1024), + }, { + .name = "free-area0", + .offset = MTDPART_OFS_APPEND, + .size = ((7 * 1024 * 1024) + (512 * 1024)), }, { - .name = "kernel", - .offset = MTDPART_OFS_APPEND, - .size = (2 * 1024 * 1024), + .name = "CPLD-Data", + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE, /* Read-only */ + .size = (1024 * 128 * 2), }, { - .name = "other", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, + .name = "free-area1", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, }, }; -- cgit v0.10.2 From 1bec157a1f747d038026efabebdee4c929147b63 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 24 Sep 2008 14:37:35 +0900 Subject: sh: Force pending restarted system calls to return -EINTR. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index be194d0..69d09c0 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -216,6 +216,9 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, sigset_t set; int r0; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -250,6 +253,9 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, sigset_t set; int r0; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 37bd381..0582ae4 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -375,6 +375,9 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, sigset_t set; long long ret; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -412,6 +415,9 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, stack_t __user st; long long ret; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -539,7 +545,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, * On SH5 all edited pointers are subject to NEFF */ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; } else { /* * Different approach on SH5. @@ -554,7 +560,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, */ DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; if (__copy_to_user(frame->retcode, (unsigned long long)sa_default_restorer & (~1), 16) != 0) @@ -570,7 +576,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, */ regs->regs[REG_SP] = (unsigned long) frame; regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? - (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ /* FIXME: @@ -656,7 +662,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * On SH5 all edited pointers are subject to NEFF */ DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; } else { /* * Different approach on SH5. @@ -672,7 +678,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; if (__copy_to_user(frame->retcode, (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0) @@ -687,7 +693,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, */ regs->regs[REG_SP] = (unsigned long) frame; regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? - (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; -- cgit v0.10.2 From c5d191b8e531e33b823242f3d2c6b81d765e96dd Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 24 Sep 2008 02:53:25 -0400 Subject: dock: Shaohua Li is new maintainer Signed-off-by: Len Brown diff --git a/MAINTAINERS b/MAINTAINERS index cad81a2..7212af7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1417,8 +1417,8 @@ M: rdunlap@xenotime.net S: Maintained DOCKING STATION DRIVER -P: Kristen Carlson Accardi -M: kristen.c.accardi@intel.com +P: Shaohua Li +M: shaohua.li@intel.com L: linux-acpi@vger.kernel.org S: Supported -- cgit v0.10.2 From 709ee531c153038d81b30649b9eeed3c44a4d5cc Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Tue, 23 Sep 2008 17:46:57 +0200 Subject: panasonic-laptop: add Panasonic Let's Note laptop extras driver v0.94 This is a driver for ACPI extras such as hotkeys and backlight brightness control on various Panasonic "Let's Note" series laptop computers. It exports the backlight via the backlight class device API, and the hotkeys as input event device. Some more esoteric items like number of installed batteries are exported via sysfs device attributes. Hotkey events also generate old-style ACPI enents through /proc/acpi/event to interoperate with current versions of acpid. Signed-off-by: Harald Welte Acked-by: Henrique de Moraes Holschuh Acked-by: Matthew Garrett Signed-off-by: Len Brown diff --git a/MAINTAINERS b/MAINTAINERS index cad81a2..fa35793 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3148,6 +3148,11 @@ M: olof@lixom.net L: i2c@lm-sensors.org S: Maintained +PANASONIC LAPTOP ACPI EXTRAS DRIVER +P: Harald Welte +M: laforge@gnumonks.org +S: Maintained + PARALLEL PORT SUPPORT L: linux-parport@lists.infradead.org (subscribers-only) S: Orphan diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index a726f3b..4ed83575 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -245,6 +245,17 @@ config MSI_LAPTOP If you have an MSI S270 laptop, say Y or M here. +config PANASONIC_LAPTOP + tristate "Panasonic Laptop Extras" + depends on X86 && INPUT + depends on BACKLIGHT_CLASS_DEVICE + ---help--- + This driver adds support for access to backlight control and hotkeys + on Panasonic Let's Note laptops. + + If you have a Panasonic Let's note laptop (such as the R1(N variant), + R2, R3, R5, T2, W2 and Y2 series), say Y. + config COMPAL_LAPTOP tristate "Compal Laptop Extras" depends on X86 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index c6c13f6..909e246 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o +obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o diff --git a/drivers/misc/panasonic-laptop.c b/drivers/misc/panasonic-laptop.c new file mode 100644 index 0000000..9aecda5 --- /dev/null +++ b/drivers/misc/panasonic-laptop.c @@ -0,0 +1,767 @@ +/* + * Panasonic HotKey and LCD brightness control driver + * (C) 2004 Hiroshi Miura + * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/ + * (C) YOKOTA Hiroshi + * (C) 2004 David Bronaugh + * (C) 2006-2008 Harald Welte + * + * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * publicshed by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + *--------------------------------------------------------------------------- + * + * ChangeLog: + * Sep.23, 2008 Harald Welte + * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to + * drivers/misc/panasonic-laptop.c + * + * Jul.04, 2008 Harald Welte + * -v0.94 replace /proc interface with device attributes + * support {set,get}keycode on th input device + * + * Jun.27, 2008 Harald Welte + * -v0.92 merge with 2.6.26-rc6 input API changes + * remove broken <= 2.6.15 kernel support + * resolve all compiler warnings + * various coding style fixes (checkpatch.pl) + * add support for backlight api + * major code restructuring + * + * Dac.28, 2007 Harald Welte + * -v0.91 merge with 2.6.24-rc6 ACPI changes + * + * Nov.04, 2006 Hiroshi Miura + * -v0.9 remove warning about section reference. + * remove acpi_os_free + * add /proc/acpi/pcc/brightness interface for HAL access + * merge dbronaugh's enhancement + * Aug.17, 2004 David Bronaugh (dbronaugh) + * - Added screen brightness setting interface + * Thanks to FreeBSD crew (acpi_panasonic.c) + * for the ideas I needed to accomplish it + * + * May.29, 2006 Hiroshi Miura + * -v0.8.4 follow to change keyinput structure + * thanks Fabian Yamaguchi , + * Jacob Bower and + * Hiroshi Yokota for providing solutions. + * + * Oct.02, 2004 Hiroshi Miura + * -v0.8.2 merge code of YOKOTA Hiroshi + * . + * Add sticky key mode interface. + * Refactoring acpi_pcc_generate_keyinput(). + * + * Sep.15, 2004 Hiroshi Miura + * -v0.8 Generate key input event on input subsystem. + * This is based on yet another driver written by + * Ryuta Nakanishi. + * + * Sep.10, 2004 Hiroshi Miura + * -v0.7 Change proc interface functions using seq_file + * facility as same as other ACPI drivers. + * + * Aug.28, 2004 Hiroshi Miura + * -v0.6.4 Fix a silly error with status checking + * + * Aug.25, 2004 Hiroshi Miura + * -v0.6.3 replace read_acpi_int by standard function + * acpi_evaluate_integer + * some clean up and make smart copyright notice. + * fix return value of pcc_acpi_get_key() + * fix checking return value of acpi_bus_register_driver() + * + * Aug.22, 2004 David Bronaugh + * -v0.6.2 Add check on ACPI data (num_sifr) + * Coding style cleanups, better error messages/handling + * Fixed an off-by-one error in memory allocation + * + * Aug.21, 2004 David Bronaugh + * -v0.6.1 Fix a silly error with status checking + * + * Aug.20, 2004 David Bronaugh + * - v0.6 Correct brightness controls to reflect reality + * based on information gleaned by Hiroshi Miura + * and discussions with Hiroshi Miura + * + * Aug.10, 2004 Hiroshi Miura + * - v0.5 support LCD brightness control + * based on the disclosed information by MEI. + * + * Jul.25, 2004 Hiroshi Miura + * - v0.4 first post version + * add function to retrive SIFR + * + * Jul.24, 2004 Hiroshi Miura + * - v0.3 get proper status of hotkey + * + * Jul.22, 2004 Hiroshi Miura + * - v0.2 add HotKey handler + * + * Jul.17, 2004 Hiroshi Miura + * - v0.1 start from toshiba_acpi driver written by John Belmonte + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifndef ACPI_HOTKEY_COMPONENT +#define ACPI_HOTKEY_COMPONENT 0x10000000 +#endif + +#define _COMPONENT ACPI_HOTKEY_COMPONENT + +MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte"); +MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops"); +MODULE_LICENSE("GPL"); + +#define LOGPREFIX "pcc_acpi: " + +/* Define ACPI PATHs */ +/* Lets note hotkeys */ +#define METHOD_HKEY_QUERY "HINF" +#define METHOD_HKEY_SQTY "SQTY" +#define METHOD_HKEY_SINF "SINF" +#define METHOD_HKEY_SSET "SSET" +#define HKEY_NOTIFY 0x80 + +#define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support" +#define ACPI_PCC_DEVICE_NAME "Hotkey" +#define ACPI_PCC_CLASS "pcc" + +#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0" + +/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent + ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00 +*/ +enum SINF_BITS { SINF_NUM_BATTERIES = 0, + SINF_LCD_TYPE, + SINF_AC_MAX_BRIGHT, + SINF_AC_MIN_BRIGHT, + SINF_AC_CUR_BRIGHT, + SINF_DC_MAX_BRIGHT, + SINF_DC_MIN_BRIGHT, + SINF_DC_CUR_BRIGHT, + SINF_MUTE, + SINF_RESERVED, + SINF_ENV_STATE, + SINF_STICKY_KEY = 0x80, + }; +/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */ + +static int acpi_pcc_hotkey_add(struct acpi_device *device); +static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); +static int acpi_pcc_hotkey_resume(struct acpi_device *device); + +static const struct acpi_device_id pcc_device_ids[] = { + { "MAT0012", 0}, + { "MAT0013", 0}, + { "MAT0018", 0}, + { "MAT0019", 0}, + { "", 0}, +}; + +static struct acpi_driver acpi_pcc_driver = { + .name = ACPI_PCC_DRIVER_NAME, + .class = ACPI_PCC_CLASS, + .ids = pcc_device_ids, + .ops = { + .add = acpi_pcc_hotkey_add, + .remove = acpi_pcc_hotkey_remove, + .resume = acpi_pcc_hotkey_resume, + }, +}; + +#define KEYMAP_SIZE 11 +static const int initial_keymap[KEYMAP_SIZE] = { + /* 0 */ KEY_RESERVED, + /* 1 */ KEY_BRIGHTNESSDOWN, + /* 2 */ KEY_BRIGHTNESSUP, + /* 3 */ KEY_DISPLAYTOGGLE, + /* 4 */ KEY_MUTE, + /* 5 */ KEY_VOLUMEDOWN, + /* 6 */ KEY_VOLUMEUP, + /* 7 */ KEY_SLEEP, + /* 8 */ KEY_PROG1, /* Change CPU boost */ + /* 9 */ KEY_BATTERY, + /* 10 */ KEY_SUSPEND, +}; + +struct pcc_acpi { + acpi_handle handle; + unsigned long num_sifr; + int sticky_mode; + u32 *sinf; + struct acpi_device *device; + struct input_dev *input_dev; + struct backlight_device *backlight; + int keymap[KEYMAP_SIZE]; +}; + +struct pcc_keyinput { + struct acpi_hotkey *hotkey; +}; + +/* method access functions */ +static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val) +{ + union acpi_object in_objs[] = { + { .integer.type = ACPI_TYPE_INTEGER, + .integer.value = func, }, + { .integer.type = ACPI_TYPE_INTEGER, + .integer.value = val, }, + }; + struct acpi_object_list params = { + .count = ARRAY_SIZE(in_objs), + .pointer = in_objs, + }; + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_pcc_write_sset"); + + status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET, + ¶ms, NULL); + + return status == AE_OK; +} + +static inline int acpi_pcc_get_sqty(struct acpi_device *device) +{ + unsigned long s; + acpi_status status; + + ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty"); + + status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY, + NULL, &s); + if (ACPI_SUCCESS(status)) + return s; + else { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "evaluation error HKEY.SQTY\n")); + return -EINVAL; + } +} + +static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf) +{ + acpi_status status; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *hkey = NULL; + int i; + + ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata"); + + status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, + &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "evaluation error HKEY.SINF\n")); + return 0; + } + + hkey = buffer.pointer; + if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n")); + goto end; + } + + if (pcc->num_sifr < hkey->package.count) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "SQTY reports bad SINF length\n")); + status = AE_ERROR; + goto end; + } + + for (i = 0; i < hkey->package.count; i++) { + union acpi_object *element = &(hkey->package.elements[i]); + if (likely(element->type == ACPI_TYPE_INTEGER)) { + sinf[i] = element->integer.value; + } else + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Invalid HKEY.SINF data\n")); + } + sinf[hkey->package.count] = -1; + +end: + kfree(buffer.pointer); + return status == AE_OK; +} + +/* backlight API interface functions */ + +/* This driver currently treats AC and DC brightness identical, + * since we don't need to invent an interface to the core ACPI + * logic to receive events in case a power supply is plugged in + * or removed */ + +static int bl_get(struct backlight_device *bd) +{ + struct pcc_acpi *pcc = bl_get_data(bd); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return pcc->sinf[SINF_AC_CUR_BRIGHT]; +} + +static int bl_set_status(struct backlight_device *bd) +{ + struct pcc_acpi *pcc = bl_get_data(bd); + int bright = bd->props.brightness; + int rc; + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT]) + bright = pcc->sinf[SINF_AC_MIN_BRIGHT]; + + if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT]) + bright = pcc->sinf[SINF_DC_MIN_BRIGHT]; + + if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] || + bright > pcc->sinf[SINF_AC_MAX_BRIGHT]) + return -EINVAL; + + rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright); + if (rc < 0) + return rc; + + return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); +} + +static struct backlight_ops pcc_backlight_ops = { + .get_brightness = bl_get, + .update_status = bl_set_status, +}; + + +/* sysfs user interface functions */ + +static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]); +} + +static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]); +} + +static ssize_t show_mute(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_MUTE]); +} + +static ssize_t show_sticky(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) + return -EIO; + + return sprintf(buf, "%u\n", pcc->sinf[SINF_STICKY_KEY]); +} + +static ssize_t set_sticky(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct acpi_device *acpi = to_acpi_device(dev); + struct pcc_acpi *pcc = acpi_driver_data(acpi); + int val; + + if (count && sscanf(buf, "%i", &val) == 1 && + (val == 0 || val == 1)) { + acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val); + pcc->sticky_mode = val; + } + + return count; +} + +static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL); +static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL); +static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL); +static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky); + +static struct attribute *pcc_sysfs_entries[] = { + &dev_attr_numbatt.attr, + &dev_attr_lcdtype.attr, + &dev_attr_mute.attr, + &dev_attr_sticky_key.attr, + NULL, +}; + +static struct attribute_group pcc_attr_group = { + .name = NULL, /* put in device directory */ + .attrs = pcc_sysfs_entries, +}; + + +/* hotkey input device driver */ + +static int pcc_getkeycode(struct input_dev *dev, int scancode, int *keycode) +{ + struct pcc_acpi *pcc = input_get_drvdata(dev); + + if (scancode >= ARRAY_SIZE(pcc->keymap)) + return -EINVAL; + + *keycode = pcc->keymap[scancode]; + + return 0; +} + +static int keymap_get_by_keycode(struct pcc_acpi *pcc, int keycode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) { + if (pcc->keymap[i] == keycode) + return i+1; + } + + return 0; +} + +static int pcc_setkeycode(struct input_dev *dev, int scancode, int keycode) +{ + struct pcc_acpi *pcc = input_get_drvdata(dev); + int oldkeycode; + + if (scancode >= ARRAY_SIZE(pcc->keymap)) + return -EINVAL; + + if (keycode < 0 || keycode > KEY_MAX) + return -EINVAL; + + oldkeycode = pcc->keymap[scancode]; + pcc->keymap[scancode] = keycode; + + set_bit(keycode, dev->keybit); + + if (!keymap_get_by_keycode(pcc, oldkeycode)) + clear_bit(oldkeycode, dev->keybit); + + return 0; +} + +static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) +{ + struct input_dev *hotk_input_dev = pcc->input_dev; + int rc; + int key_code, hkey_num; + unsigned long result; + + ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput"); + + rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY, + NULL, &result); + if (!ACPI_SUCCESS(rc)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "error getting hotkey status\n")); + return; + } + + acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result); + + hkey_num = result & 0xf; + + if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "hotkey number out of range: %d\n", + hkey_num)); + return; + } + + key_code = pcc->keymap[hkey_num]; + + if (key_code != KEY_RESERVED) { + int pushed = (result & 0x80) ? TRUE : FALSE; + + input_report_key(hotk_input_dev, key_code, pushed); + input_sync(hotk_input_dev); + } + + return; +} + +static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data) +{ + struct pcc_acpi *pcc = (struct pcc_acpi *) data; + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify"); + + switch (event) { + case HKEY_NOTIFY: + acpi_pcc_generate_keyinput(pcc); + break; + default: + /* nothing to do */ + break; + } +} + +static int acpi_pcc_init_input(struct pcc_acpi *pcc) +{ + int i, rc; + + ACPI_FUNCTION_TRACE("acpi_pcc_init_input"); + + pcc->input_dev = input_allocate_device(); + if (!pcc->input_dev) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Couldn't allocate input device for hotkey")); + return -ENOMEM; + } + + pcc->input_dev->evbit[0] = BIT(EV_KEY); + + pcc->input_dev->name = ACPI_PCC_DRIVER_NAME; + pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS; + pcc->input_dev->id.bustype = BUS_HOST; + pcc->input_dev->id.vendor = 0x0001; + pcc->input_dev->id.product = 0x0001; + pcc->input_dev->id.version = 0x0100; + pcc->input_dev->getkeycode = pcc_getkeycode; + pcc->input_dev->setkeycode = pcc_setkeycode; + + /* load initial keymap */ + memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap)); + + for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) + __set_bit(pcc->keymap[i], pcc->input_dev->keybit); + __clear_bit(KEY_RESERVED, pcc->input_dev->keybit); + + input_set_drvdata(pcc->input_dev, pcc); + + rc = input_register_device(pcc->input_dev); + if (rc < 0) + input_free_device(pcc->input_dev); + + return rc; +} + +/* kernel module interface */ + +static int acpi_pcc_hotkey_resume(struct acpi_device *device) +{ + struct pcc_acpi *pcc = acpi_driver_data(device); + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume"); + + if (device == NULL || pcc == NULL) + return -EINVAL; + + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n", + pcc->sticky_mode)); + + status = acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); + + return status == AE_OK ? 0 : -EINVAL; +} + +static int acpi_pcc_hotkey_add(struct acpi_device *device) +{ + acpi_status status; + struct pcc_acpi *pcc; + int num_sifr, result; + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add"); + + if (!device) + return -EINVAL; + + num_sifr = acpi_pcc_get_sqty(device); + + if (num_sifr > 255) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large")); + return -ENODEV; + } + + pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL); + if (!pcc) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Couldn't allocate mem for pcc")); + return -ENOMEM; + } + + pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL); + if (!pcc->sinf) { + result = -ENOMEM; + goto out_hotkey; + } + + pcc->device = device; + pcc->handle = device->handle; + pcc->num_sifr = num_sifr; + acpi_driver_data(device) = pcc; + strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_PCC_CLASS); + + result = acpi_pcc_init_input(pcc); + if (result) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing keyinput handler\n")); + goto out_sinf; + } + + /* initialize hotkey input device */ + status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, + acpi_pcc_hotkey_notify, pcc); + + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error installing notify handler\n")); + result = -ENODEV; + goto out_input; + } + + /* initialize backlight */ + pcc->backlight = backlight_device_register("panasonic", NULL, pcc, + &pcc_backlight_ops); + if (IS_ERR(pcc->backlight)) + goto out_notify; + + if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Couldn't retrieve BIOS data\n")); + goto out_backlight; + } + + /* read the initial brightness setting from the hardware */ + pcc->backlight->props.max_brightness = + pcc->sinf[SINF_AC_MAX_BRIGHT]; + pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; + + /* read the initial sticky key mode from the hardware */ + pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY]; + + /* add sysfs attributes */ + result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group); + if (result) + goto out_backlight; + + return 0; + +out_backlight: + backlight_device_unregister(pcc->backlight); +out_notify: + acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, + acpi_pcc_hotkey_notify); +out_input: + input_unregister_device(pcc->input_dev); + /* no need to input_free_device() since core input API refcount and + * free()s the device */ +out_sinf: + kfree(pcc->sinf); +out_hotkey: + kfree(pcc); + + return result; +} + +static int __init acpi_pcc_init(void) +{ + int result = 0; + + ACPI_FUNCTION_TRACE("acpi_pcc_init"); + + if (acpi_disabled) + return -ENODEV; + + result = acpi_bus_register_driver(&acpi_pcc_driver); + if (result < 0) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, + "Error registering hotkey driver\n")); + return -ENODEV; + } + + return 0; +} + +static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) +{ + struct pcc_acpi *pcc = acpi_driver_data(device); + + ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove"); + + if (!device || !pcc) + return -EINVAL; + + sysfs_remove_group(&device->dev.kobj, &pcc_attr_group); + + backlight_device_unregister(pcc->backlight); + + acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, + acpi_pcc_hotkey_notify); + + input_unregister_device(pcc->input_dev); + /* no need to input_free_device() since core input API refcount and + * free()s the device */ + + kfree(pcc->sinf); + kfree(pcc); + + return 0; +} + +static void __exit acpi_pcc_exit(void) +{ + ACPI_FUNCTION_TRACE("acpi_pcc_exit"); + + acpi_bus_unregister_driver(&acpi_pcc_driver); +} + +module_init(acpi_pcc_init); +module_exit(acpi_pcc_exit); -- cgit v0.10.2 From 4c168eaf7ea39f25a45a3d8c7eebc3fedb633a1d Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 24 Sep 2008 11:08:52 +0200 Subject: Revert "Oprofile Multiplexing Patch" Reverting commit 1a960b402a51d80abf54e3f8e4972374ffe5f22d for the main branch. Multiplexing will be tracked on a separate feature branch. Conflicts: arch/x86/oprofile/nmi_int.c diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 4108d02..287513a 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -23,18 +23,12 @@ #include "op_counter.h" #include "op_x86_model.h" -DEFINE_PER_CPU(int, switch_index); - static struct op_x86_model_spec const *model; static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); static DEFINE_PER_CPU(unsigned long, saved_lvtpc); static int nmi_start(void); static void nmi_stop(void); -static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); -static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); -static void nmi_cpu_stop(void *dummy); -static void nmi_cpu_start(void *dummy); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; @@ -87,47 +81,6 @@ static void exit_sysfs(void) #define exit_sysfs() do { } while (0) #endif /* CONFIG_PM */ -static void nmi_cpu_switch(void *dummy) -{ - int cpu = smp_processor_id(); - int si = per_cpu(switch_index, cpu); - struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); - - nmi_cpu_stop(NULL); - nmi_cpu_save_mpx_registers(msrs); - - /* move to next set */ - si += model->num_hardware_counters; - if ((si > model->num_counters) || (counter_config[si].count == 0)) - per_cpu(switch_index, smp_processor_id()) = 0; - else - per_cpu(switch_index, smp_processor_id()) = si; - - nmi_cpu_restore_mpx_registers(msrs); - model->setup_ctrs(msrs); - nmi_cpu_start(NULL); -} - -/* - * Quick check to see if multiplexing is necessary. - * The check should be sufficient since counters are used - * in ordre. - */ -static int nmi_multiplex_on(void) -{ - return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; -} - -static int nmi_switch_event(void) -{ - if (nmi_multiplex_on() < 0) - return -EINVAL; - - on_each_cpu(nmi_cpu_switch, NULL, 1); - - return 0; -} - static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -191,10 +144,11 @@ static void free_msrs(void) static int allocate_msrs(void) { - int i, success = 1; + int success = 1; size_t controls_size = sizeof(struct op_msr) * model->num_controls; size_t counters_size = sizeof(struct op_msr) * model->num_counters; + int i; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, GFP_KERNEL); @@ -202,8 +156,8 @@ static int allocate_msrs(void) success = 0; break; } - per_cpu(cpu_msrs, i).controls = - kmalloc(controls_size, GFP_KERNEL); + per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, + GFP_KERNEL); if (!per_cpu(cpu_msrs, i).controls) { success = 0; break; @@ -247,8 +201,7 @@ static int nmi_setup(void) return err; } - /* - * We need to serialize save and setup for HT because the subset + /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ @@ -264,6 +217,7 @@ static int nmi_setup(void) per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); } + } on_each_cpu(nmi_save_registers, NULL, 1); on_each_cpu(nmi_cpu_setup, NULL, 1); @@ -271,41 +225,7 @@ static int nmi_setup(void) return 0; } -static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) -{ - unsigned int si = __get_cpu_var(switch_index); - unsigned int const nr_ctrs = model->num_hardware_counters; - struct op_msr *counters = &msrs->counters[si]; - unsigned int i; - - for (i = 0; i < nr_ctrs; ++i) { - int offset = i + si; - if (counters[offset].addr) { - rdmsr(counters[offset].addr, - counters[offset].multiplex.low, - counters[offset].multiplex.high); - } - } -} - -static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) -{ - unsigned int si = __get_cpu_var(switch_index); - unsigned int const nr_ctrs = model->num_hardware_counters; - struct op_msr *counters = &msrs->counters[si]; - unsigned int i; - - for (i = 0; i < nr_ctrs; ++i) { - int offset = i + si; - if (counters[offset].addr) { - wrmsr(counters[offset].addr, - counters[offset].multiplex.low, - counters[offset].multiplex.high); - } - } -} - -static void nmi_cpu_restore_registers(struct op_msrs *msrs) +static void nmi_restore_registers(struct op_msrs *msrs) { unsigned int const nr_ctrs = model->num_counters; unsigned int const nr_ctrls = model->num_controls; @@ -345,8 +265,7 @@ static void nmi_cpu_shutdown(void *dummy) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); apic_write(APIC_LVTERR, v); - nmi_cpu_restore_registers(msrs); - __get_cpu_var(switch_index) = 0; + nmi_restore_registers(msrs); } static void nmi_shutdown(void) @@ -409,7 +328,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); - counter_config[i].save_count_low = 0; } return 0; @@ -551,14 +469,12 @@ int __init op_nmi_init(struct oprofile_operations *ops) } /* default values, can be overwritten by model */ - __raw_get_cpu_var(switch_index) = 0; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; - ops->switch_events = nmi_switch_event; if (model->init) ret = model->init(ops); diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h index 786d6e0..2880b15 100644 --- a/arch/x86/oprofile/op_counter.h +++ b/arch/x86/oprofile/op_counter.h @@ -10,14 +10,13 @@ #ifndef OP_COUNTER_H #define OP_COUNTER_H -#define OP_MAX_COUNTER 32 +#define OP_MAX_COUNTER 8 /* Per-perfctr configuration as set via * oprofilefs. */ struct op_counter_config { unsigned long count; - unsigned long save_count_low; unsigned long enabled; unsigned long event; unsigned long kernel; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index bbf2b68..d9faf60 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -24,10 +23,8 @@ #include "op_x86_model.h" #include "op_counter.h" -#define NUM_COUNTERS 32 -#define NUM_HARDWARE_COUNTERS 4 -#define NUM_CONTROLS 32 -#define NUM_HARDWARE_CONTROLS 4 +#define NUM_COUNTERS 4 +#define NUM_CONTROLS 4 #define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) @@ -51,7 +48,6 @@ #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; -DECLARE_PER_CPU(int, switch_index); #ifdef CONFIG_OPROFILE_IBS @@ -134,17 +130,15 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) int i; for (i = 0; i < NUM_COUNTERS; i++) { - int hw_counter = i % NUM_HARDWARE_COUNTERS; - if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + hw_counter)) - msrs->counters[i].addr = MSR_K7_PERFCTR0 + hw_counter; + if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) + msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; else msrs->counters[i].addr = 0; } for (i = 0; i < NUM_CONTROLS; i++) { - int hw_control = i % NUM_HARDWARE_CONTROLS; - if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + hw_control)) - msrs->controls[i].addr = MSR_K7_EVNTSEL0 + hw_control; + if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) + msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; else msrs->controls[i].addr = 0; } @@ -156,16 +150,8 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) unsigned int low, high; int i; - for (i = 0; i < NUM_HARDWARE_CONTROLS; ++i) { - int offset = i + __get_cpu_var(switch_index); - if (counter_config[offset].enabled) - reset_value[offset] = counter_config[offset].count; - else - reset_value[offset] = 0; - } - /* clear all counters */ - for (i = 0 ; i < NUM_HARDWARE_CONTROLS; ++i) { + for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; CTRL_READ(low, high, msrs, i); @@ -175,31 +161,34 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) } /* avoid a false detection of ctr overflows in NMI handler */ - for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { + for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; CTR_WRITE(1, msrs, i); } /* enable active counters */ - for (i = 0; i < NUM_HARDWARE_COUNTERS; ++i) { - int offset = i + __get_cpu_var(switch_index); - if ((counter_config[offset].enabled) && (CTR_IS_RESERVED(msrs, i))) { - CTR_WRITE(counter_config[offset].count, msrs, i); + for (i = 0; i < NUM_COUNTERS; ++i) { + if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { + reset_value[i] = counter_config[i].count; + + CTR_WRITE(counter_config[i].count, msrs, i); CTRL_READ(low, high, msrs, i); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[offset].user); - CTRL_SET_KERN(low, counter_config[offset].kernel); - CTRL_SET_UM(low, counter_config[offset].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[offset].event); - CTRL_SET_EVENT_HIGH(high, counter_config[offset].event); + CTRL_SET_USR(low, counter_config[i].user); + CTRL_SET_KERN(low, counter_config[i].kernel); + CTRL_SET_UM(low, counter_config[i].unit_mask); + CTRL_SET_EVENT_LOW(low, counter_config[i].event); + CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); CTRL_WRITE(low, high, msrs, i); + } else { + reset_value[i] = 0; } } } @@ -287,14 +276,13 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, unsigned int low, high; int i; - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { - int offset = i + __get_cpu_var(switch_index); - if (!reset_value[offset]) + for (i = 0 ; i < NUM_COUNTERS; ++i) { + if (!reset_value[i]) continue; CTR_READ(low, high, msrs, i); if (CTR_OVERFLOWED(low)) { - oprofile_add_sample(regs, offset); - CTR_WRITE(reset_value[offset], msrs, i); + oprofile_add_sample(regs, i); + CTR_WRITE(reset_value[i], msrs, i); } } @@ -310,10 +298,8 @@ static void op_amd_start(struct op_msrs const * const msrs) { unsigned int low, high; int i; - - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { - int offset = i + __get_cpu_var(switch_index); - if (reset_value[offset]) { + for (i = 0 ; i < NUM_COUNTERS ; ++i) { + if (reset_value[i]) { CTRL_READ(low, high, msrs, i); CTRL_SET_ACTIVE(low); CTRL_WRITE(low, high, msrs, i); @@ -343,8 +329,8 @@ static void op_amd_stop(struct op_msrs const * const msrs) /* Subtle: stop on all counters to avoid race with * setting our pm callback */ - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { - if (!reset_value[i + per_cpu(switch_index, smp_processor_id())]) + for (i = 0 ; i < NUM_COUNTERS ; ++i) { + if (!reset_value[i]) continue; CTRL_READ(low, high, msrs, i); CTRL_SET_INACTIVE(low); @@ -370,11 +356,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) { int i; - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (CTR_IS_RESERVED(msrs, i)) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } - for (i = 0 ; i < NUM_HARDWARE_COUNTERS ; ++i) { + for (i = 0 ; i < NUM_CONTROLS ; ++i) { if (CTRL_IS_RESERVED(msrs, i)) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } @@ -548,8 +534,6 @@ struct op_x86_model_spec const op_amd_spec = { .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, - .num_hardware_counters = NUM_HARDWARE_COUNTERS, - .num_hardware_controls = NUM_HARDWARE_CONTROLS, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index cacba61..43ac5af 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -700,8 +700,6 @@ static void p4_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, - .num_hardware_counters = NUM_COUNTERS_HT2, - .num_hardware_controls = NUM_CONTROLS_HT2, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, @@ -714,8 +712,6 @@ struct op_x86_model_spec const op_p4_ht2_spec = { struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, .num_controls = NUM_CONTROLS_NON_HT, - .num_hardware_counters = NUM_COUNTERS_NON_HT, - .num_hardware_controls = NUM_CONTROLS_NON_HT, .fill_in_addresses = &p4_fill_in_addresses, .setup_ctrs = &p4_setup_ctrs, .check_ctrs = &p4_check_ctrs, diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index e5811aa..eff431f 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -183,8 +183,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, - .num_hardware_counters = NUM_COUNTERS, - .num_hardware_controls = NUM_CONTROLS, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index e07ba10..05a0261 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -19,7 +19,6 @@ struct op_saved_msr { struct op_msr { unsigned long addr; struct op_saved_msr saved; - struct op_saved_msr multiplex; }; struct op_msrs { @@ -35,8 +34,6 @@ struct pt_regs; struct op_x86_model_spec { int (*init)(struct oprofile_operations *ops); void (*exit)(void); - unsigned int const num_hardware_counters; - unsigned int const num_hardware_controls; unsigned int const num_counters; unsigned int const num_controls; void (*fill_in_addresses)(struct op_msrs * const msrs); diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index b2fa5df..2c64517 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -12,8 +12,6 @@ #include #include #include -#include -#include #include #include "oprof.h" @@ -21,18 +19,13 @@ #include "cpu_buffer.h" #include "buffer_sync.h" #include "oprofile_stats.h" - -static unsigned long is_setup; -static void switch_worker(struct work_struct *work); -static DECLARE_DELAYED_WORK(switch_work, switch_worker); -static DEFINE_MUTEX(start_mutex); struct oprofile_operations oprofile_ops; -unsigned long timeout_jiffies; unsigned long oprofile_started; unsigned long backtrace_depth; -/* Multiplexing defaults at 1 msec*/ +static unsigned long is_setup; +static DEFINE_MUTEX(start_mutex); /* timer 0 - use performance monitoring hardware if available @@ -94,16 +87,6 @@ out: return err; } -static void start_switch_worker(void) -{ - schedule_delayed_work(&switch_work, timeout_jiffies); -} - -static void switch_worker(struct work_struct *work) -{ - if (!oprofile_ops.switch_events()) - start_switch_worker(); -} /* Actually start profiling (echo 1>/dev/oprofile/enable) */ int oprofile_start(void) @@ -111,6 +94,7 @@ int oprofile_start(void) int err = -EINVAL; mutex_lock(&start_mutex); + if (!is_setup) goto out; @@ -124,9 +108,6 @@ int oprofile_start(void) if ((err = oprofile_ops.start())) goto out; - if (oprofile_ops.switch_events) - start_switch_worker(); - oprofile_started = 1; out: mutex_unlock(&start_mutex); @@ -142,7 +123,6 @@ void oprofile_stop(void) goto out; oprofile_ops.stop(); oprofile_started = 0; - cancel_delayed_work_sync(&switch_work); /* wake up the daemon to read what remains */ wake_up_buffer_waiter(); out: @@ -175,32 +155,6 @@ post_sync: mutex_unlock(&start_mutex); } -/* User inputs in ms, converts to jiffies */ -int oprofile_set_timeout(unsigned long val_msec) -{ - int err = 0; - - mutex_lock(&start_mutex); - - if (oprofile_started) { - err = -EBUSY; - goto out; - } - - if (!oprofile_ops.switch_events) { - err = -EINVAL; - goto out; - } - - timeout_jiffies = msecs_to_jiffies(val_msec); - if (timeout_jiffies == MAX_JIFFY_OFFSET) - timeout_jiffies = msecs_to_jiffies(1); - -out: - mutex_unlock(&start_mutex); - return err; - -} int oprofile_set_backtrace(unsigned long val) { @@ -225,16 +179,10 @@ out: return err; } -static void __init oprofile_switch_timer_init(void) -{ - timeout_jiffies = msecs_to_jiffies(1); -} - static int __init oprofile_init(void) { int err; - oprofile_switch_timer_init(); err = oprofile_arch_init(&oprofile_ops); if (err < 0 || timer) { diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index c4406a7..1832365 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h @@ -27,8 +27,7 @@ extern unsigned long fs_buffer_watershed; extern struct oprofile_operations oprofile_ops; extern unsigned long oprofile_started; extern unsigned long backtrace_depth; -extern unsigned long timeout_jiffies; - + struct super_block; struct dentry; @@ -36,6 +35,5 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root); void oprofile_timer_init(struct oprofile_operations * ops); int oprofile_set_backtrace(unsigned long depth); -int oprofile_set_timeout(unsigned long time); #endif /* OPROF_H */ diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index cc4f5a1..ef953ba 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -9,7 +9,6 @@ #include #include -#include #include "event_buffer.h" #include "oprofile_stats.h" @@ -19,40 +18,6 @@ unsigned long fs_buffer_size = 131072; unsigned long fs_cpu_buffer_size = 8192; unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ -static ssize_t timeout_read(struct file *file, char __user *buf, - size_t count, loff_t *offset) -{ - return oprofilefs_ulong_to_user(jiffies_to_msecs(timeout_jiffies), - buf, count, offset); -} - - -static ssize_t timeout_write(struct file *file, char const __user *buf, - size_t count, loff_t *offset) -{ - unsigned long val; - int retval; - - if (*offset) - return -EINVAL; - - retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) - return retval; - - retval = oprofile_set_timeout(val); - - if (retval) - return retval; - return count; -} - -static const struct file_operations timeout_fops = { - .read = timeout_read, - .write = timeout_write, -}; - - static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset) { return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); @@ -120,10 +85,11 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t if (*offset) return -EINVAL; + retval = oprofilefs_ulong_from_user(&val, buf, count); if (retval) return retval; - + if (val) retval = oprofile_start(); else @@ -163,7 +129,6 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root) oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); - oprofilefs_create_file(sb, root, "timeout_ms", &timeout_fops); oprofile_create_stats_files(sb, root); if (oprofile_ops.create_files) oprofile_ops.create_files(sb, root); diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 687f2f4..bcb8f72 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -67,9 +67,6 @@ struct oprofile_operations { /* Initiate a stack backtrace. Optional. */ void (*backtrace)(struct pt_regs * const regs, unsigned int depth); - - /* Multiplex between different events. Optional. */ - int (*switch_events)(void); /* CPU identification string. */ char * cpu_type; }; -- cgit v0.10.2 From 4aa7361179bed905fd0f35b236a5c65db683b9e0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:46 -0700 Subject: posix-timers: don't switch to ->group_leader if ->it_process dies posix_timer_event() drops SIGEV_THREAD_ID and switches to ->group_leader if send_sigqueue() fails. This is not very useful and doesn't work reliably. send_sigqueue() can only fail if ->it_process is dead. But it can die before it dequeues the SI_TIMER signal, in that case the timer stops anyway. Remove this code. I guess it was needed a long ago to ensure that the timer is not destroyed when when its creator thread dies. Q: perhaps it makes sense to change sys_timer_settime() to return an error if ->it_process is dead? Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d579..3dfd15a 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -298,6 +298,7 @@ void do_schedule_next_timer(struct siginfo *info) int posix_timer_event(struct k_itimer *timr, int si_private) { + int shared, ret; /* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->do_schedule_next_timer(). @@ -316,20 +317,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private) timr->sigq->info.si_tid = timr->it_id; timr->sigq->info.si_value = timr->it_sigev_value; - if (timr->it_sigev_notify & SIGEV_THREAD_ID) { - struct task_struct *leader; - int ret = send_sigqueue(timr->sigq, timr->it_process, 0); - - if (likely(ret >= 0)) - return ret; - - timr->it_sigev_notify = SIGEV_SIGNAL; - leader = timr->it_process->group_leader; - put_task_struct(timr->it_process); - timr->it_process = leader; - } - - return send_sigqueue(timr->sigq, timr->it_process, 1); + shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); + ret = send_sigqueue(timr->sigq, timr->it_process, shared); + /* If we failed to send the signal the timer stops. */ + return ret > 0; } EXPORT_SYMBOL_GPL(posix_timer_event); -- cgit v0.10.2 From 918fc0372831dca73039e1577bfea0c2ce49bdb6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:46 -0700 Subject: posix-timers: always do get_task_struct(timer->it_process) Change the code to get/put timer->it_process regardless of SIGEV_THREAD_ID. This streamlines the create/destroy paths and allows us to simplify the usage of exit_itimers() in de_thread(). Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 3dfd15a..bd9c931 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -540,11 +540,10 @@ sys_timer_create(const clockid_t which_clock, */ spin_lock_irqsave(&process->sighand->siglock, flags); if (!(process->flags & PF_EXITING)) { + get_task_struct(process); new_timer->it_process = process; list_add(&new_timer->list, &process->signal->posix_timers); - if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) - get_task_struct(process); spin_unlock_irqrestore(&process->sighand->siglock, flags); } else { spin_unlock_irqrestore(&process->sighand->siglock, flags); @@ -561,6 +560,7 @@ sys_timer_create(const clockid_t which_clock, new_timer->it_sigev_signo = SIGALRM; new_timer->it_sigev_value.sival_int = new_timer->it_id; process = current->group_leader; + get_task_struct(process); spin_lock_irqsave(&process->sighand->siglock, flags); new_timer->it_process = process; list_add(&new_timer->list, &process->signal->posix_timers); @@ -853,8 +853,7 @@ retry_delete: * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ - if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) - put_task_struct(timer->it_process); + put_task_struct(timer->it_process); timer->it_process = NULL; unlock_timer(timer, flags); @@ -881,8 +880,7 @@ retry_delete: * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ - if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) - put_task_struct(timer->it_process); + put_task_struct(timer->it_process); timer->it_process = NULL; unlock_timer(timer, flags); -- cgit v0.10.2 From 2cd499e38ec241691e4bce50bddc8f57e4cc9bd0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:47 -0700 Subject: posix-timers: sys_timer_create: remove the buggy PF_EXITING check sys_timer_create() return -EINVAL if the target thread has PF_EXITING. This doesn't really make sense, the sub-thread can die right after unlock. And in fact, this is just wrong. Without SIGEV_THREAD_ID good_sigevent() returns ->group_leader, and it is very possible that the leader is already dead. This is OK, we shouldn't return the error in this case. Remove this check and the comment. Note that the "process" was found under tasklist_lock, it must have ->sighand != NULL. Also, remove a couple of unneeded initializations. Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index bd9c931..60b2620 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -460,9 +460,9 @@ sys_timer_create(const clockid_t which_clock, timer_t __user * created_timer_id) { int error = 0; - struct k_itimer *new_timer = NULL; + struct k_itimer *new_timer; int new_timer_id; - struct task_struct *process = NULL; + struct task_struct *process; unsigned long flags; sigevent_t event; int it_id_set = IT_ID_NOT_SET; @@ -523,32 +523,12 @@ sys_timer_create(const clockid_t which_clock, read_lock(&tasklist_lock); if ((process = good_sigevent(&event))) { - /* - * We may be setting up this process for another - * thread. It may be exiting. To catch this - * case the we check the PF_EXITING flag. If - * the flag is not set, the siglock will catch - * him before it is too late (in exit_itimers). - * - * The exec case is a bit more invloved but easy - * to code. If the process is in our thread - * group (and it must be or we would not allow - * it here) and is doing an exec, it will cause - * us to be killed. In this case it will wait - * for us to die which means we can finish this - * linkage with our last gasp. I.e. no code :) - */ + get_task_struct(process); spin_lock_irqsave(&process->sighand->siglock, flags); - if (!(process->flags & PF_EXITING)) { - get_task_struct(process); - new_timer->it_process = process; - list_add(&new_timer->list, - &process->signal->posix_timers); - spin_unlock_irqrestore(&process->sighand->siglock, flags); - } else { - spin_unlock_irqrestore(&process->sighand->siglock, flags); - process = NULL; - } + new_timer->it_process = process; + list_add(&new_timer->list, + &process->signal->posix_timers); + spin_unlock_irqrestore(&process->sighand->siglock, flags); } read_unlock(&tasklist_lock); if (!process) { -- cgit v0.10.2 From 36b2f046000b358b62b9d116cb10a2b1c5be5cbf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:48 -0700 Subject: posix-timers: sys_timer_create: simplify and s/tasklist/rcu/ - Change the code to do rcu_read_lock() instead of taking tasklist_lock, it is safe to get_task_struct(p) if p was found under RCU. However, now we must not use process's sighand/signal, they may be NULL. We can use current->sighand/signal instead, this "process" must belong to the current's thread-group. - Factor out the common code for 2 "if (timer_event_spec)" branches, the !timer_event_spec case can use current too. - use spin_lock_irq() instead of _irqsave(), kill "flags". Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 60b2620..5b76190 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -463,7 +463,6 @@ sys_timer_create(const clockid_t which_clock, struct k_itimer *new_timer; int new_timer_id; struct task_struct *process; - unsigned long flags; sigevent_t event; int it_id_set = IT_ID_NOT_SET; @@ -521,16 +520,11 @@ sys_timer_create(const clockid_t which_clock, new_timer->it_sigev_signo = event.sigev_signo; new_timer->it_sigev_value = event.sigev_value; - read_lock(&tasklist_lock); - if ((process = good_sigevent(&event))) { + rcu_read_lock(); + process = good_sigevent(&event); + if (process) get_task_struct(process); - spin_lock_irqsave(&process->sighand->siglock, flags); - new_timer->it_process = process; - list_add(&new_timer->list, - &process->signal->posix_timers); - spin_unlock_irqrestore(&process->sighand->siglock, flags); - } - read_unlock(&tasklist_lock); + rcu_read_unlock(); if (!process) { error = -EINVAL; goto out; @@ -541,19 +535,18 @@ sys_timer_create(const clockid_t which_clock, new_timer->it_sigev_value.sival_int = new_timer->it_id; process = current->group_leader; get_task_struct(process); - spin_lock_irqsave(&process->sighand->siglock, flags); - new_timer->it_process = process; - list_add(&new_timer->list, &process->signal->posix_timers); - spin_unlock_irqrestore(&process->sighand->siglock, flags); } + spin_lock_irq(¤t->sighand->siglock); + new_timer->it_process = process; + list_add(&new_timer->list, ¤t->signal->posix_timers); + spin_unlock_irq(¤t->sighand->siglock); /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ - out: if (error) release_posix_timer(new_timer, it_id_set); -- cgit v0.10.2 From 717835d94d3e3d343a302df0a3cb9405887c3e2a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:49 -0700 Subject: posix-timers: move the initialization of timer->sigq from send to create path posix_timer_event() always populates timer->sigq with the same numbers, move this code into sys_timer_create(). Note that with this patch we can kill it_sigev_signo and it_sigev_value. Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5b76190..c459b29 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -312,11 +312,6 @@ int posix_timer_event(struct k_itimer *timr, int si_private) */ timr->sigq->info.si_sys_private = si_private; - timr->sigq->info.si_signo = timr->it_sigev_signo; - timr->sigq->info.si_code = SI_TIMER; - timr->sigq->info.si_tid = timr->it_id; - timr->sigq->info.si_value = timr->it_sigev_value; - shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); ret = send_sigqueue(timr->sigq, timr->it_process, shared); /* If we failed to send the signal the timer stops. */ @@ -537,6 +532,11 @@ sys_timer_create(const clockid_t which_clock, get_task_struct(process); } + new_timer->sigq->info.si_code = SI_TIMER; + new_timer->sigq->info.si_tid = new_timer->it_id; + new_timer->sigq->info.si_signo = new_timer->it_sigev_signo; + new_timer->sigq->info.si_value = new_timer->it_sigev_value; + spin_lock_irq(¤t->sighand->siglock); new_timer->it_process = process; list_add(&new_timer->list, ¤t->signal->posix_timers); -- cgit v0.10.2 From ef864c958801768fb28bd3603cd0b098b394671c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:49 -0700 Subject: posix-timers: sys_timer_create: cleanup the error handling Cleanup. - sys_timer_create() is big and complicated. The code above the "out:" label relies on the fact that "error" must be == 0. This is not very robust, make the code more explicit. Remove the unneeded initialization of error. - If idr_get_new() succeeds (as it normally should), we check the returned value twice. Move the "-EAGAIN" check under "if (error)". Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index c459b29..7be385f 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -454,9 +454,8 @@ sys_timer_create(const clockid_t which_clock, struct sigevent __user *timer_event_spec, timer_t __user * created_timer_id) { - int error = 0; struct k_itimer *new_timer; - int new_timer_id; + int error, new_timer_id; struct task_struct *process; sigevent_t event; int it_id_set = IT_ID_NOT_SET; @@ -478,9 +477,9 @@ sys_timer_create(const clockid_t which_clock, error = idr_get_new(&posix_timers_id, (void *) new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); - if (error == -EAGAIN) - goto retry; - else if (error) { + if (error) { + if (error == -EAGAIN) + goto retry; /* * Weird looking, but we return EAGAIN if the IDR is * full (proper POSIX return value for this) @@ -541,6 +540,8 @@ sys_timer_create(const clockid_t which_clock, new_timer->it_process = process; list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); + + return 0; /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task @@ -548,9 +549,7 @@ sys_timer_create(const clockid_t which_clock, * new_timer after the unlock call. */ out: - if (error) - release_posix_timer(new_timer, it_id_set); - + release_posix_timer(new_timer, it_id_set); return error; } -- cgit v0.10.2 From 5a9fa73072854981a5c05eb7ba18a96d49c2804f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:50 -0700 Subject: posix-timers: kill ->it_sigev_signo and ->it_sigev_value With the recent changes ->it_sigev_signo and ->it_sigev_value are only used in sys_timer_create(), kill them. Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index f9d8e9e..a7c7213 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -45,8 +45,6 @@ struct k_itimer { int it_requeue_pending; /* waiting to requeue this timer */ #define REQUEUE_PENDING 1 int it_sigev_notify; /* notify word of sigevent struct */ - int it_sigev_signo; /* signo word of sigevent struct */ - sigval_t it_sigev_value; /* value word of sigevent struct */ struct task_struct *it_process; /* process to send signal to */ struct sigqueue *sigq; /* signal queue entry. */ union { diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 7be385f..3eff47b 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -510,10 +510,6 @@ sys_timer_create(const clockid_t which_clock, error = -EFAULT; goto out; } - new_timer->it_sigev_notify = event.sigev_notify; - new_timer->it_sigev_signo = event.sigev_signo; - new_timer->it_sigev_value = event.sigev_value; - rcu_read_lock(); process = good_sigevent(&event); if (process) @@ -524,17 +520,18 @@ sys_timer_create(const clockid_t which_clock, goto out; } } else { - new_timer->it_sigev_notify = SIGEV_SIGNAL; - new_timer->it_sigev_signo = SIGALRM; - new_timer->it_sigev_value.sival_int = new_timer->it_id; + event.sigev_notify = SIGEV_SIGNAL; + event.sigev_signo = SIGALRM; + event.sigev_value.sival_int = new_timer->it_id; process = current->group_leader; get_task_struct(process); } - new_timer->sigq->info.si_code = SI_TIMER; + new_timer->it_sigev_notify = event.sigev_notify; + new_timer->sigq->info.si_signo = event.sigev_signo; + new_timer->sigq->info.si_value = event.sigev_value; new_timer->sigq->info.si_tid = new_timer->it_id; - new_timer->sigq->info.si_signo = new_timer->it_sigev_signo; - new_timer->sigq->info.si_value = new_timer->it_sigev_value; + new_timer->sigq->info.si_code = SI_TIMER; spin_lock_irq(¤t->sighand->siglock); new_timer->it_process = process; -- cgit v0.10.2 From 5a51b713ccf8835d5adf7217e2f86eb12b1ca851 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:51 -0700 Subject: posix-timers: lock_timer: kill the bogus ->it_id check lock_timer() checks that the timer found by idr_find(timer_id) has ->it_id == timer_id. This buys nothing. This check can fail only if sys_timer_create() unlocked idr_lock after idr_get_new(), but didn't set ->it_id = new_timer_id yet. But in that case ->it_process == NULL so lock_timer() can't succeed anyway. Also remove a couple of unneeded typecasts. Note that with or without this patch we have a small problem. sys_timer_create() doesn't ensure that the result of setting (say) ->it_sigev_notify must be visible if lock_timer() succeeds. Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 3eff47b..7185f05 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -474,8 +474,7 @@ sys_timer_create(const clockid_t which_clock, goto out; } spin_lock_irq(&idr_lock); - error = idr_get_new(&posix_timers_id, (void *) new_timer, - &new_timer_id); + error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); if (error) { if (error == -EAGAIN) @@ -567,12 +566,12 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) */ spin_lock_irqsave(&idr_lock, *flags); - timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); + timr = idr_find(&posix_timers_id, (int) timer_id); if (timr) { spin_lock(&timr->it_lock); - if ((timr->it_id != timer_id) || !(timr->it_process) || - !same_thread_group(timr->it_process, current)) { + if (!timr->it_process || + !same_thread_group(timr->it_process, current)) { spin_unlock(&timr->it_lock); spin_unlock_irqrestore(&idr_lock, *flags); timr = NULL; -- cgit v0.10.2 From 31d9284569e38fb97117497af3e8047a6a3c86f0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 22 Sep 2008 14:42:51 -0700 Subject: posix-timers: lock_timer: make it readable Cleanup. Imho makes the code much more understandable. At least this patch lessens both the source and compiled code. Signed-off-by: Oleg Nesterov Cc: mingo@elte.hu Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 7185f05..95451bf 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -556,7 +556,7 @@ out: * the find to the timer lock. To avoid a dead lock, the timer id MUST * be release with out holding the timer lock. */ -static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) +static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags) { struct k_itimer *timr; /* @@ -564,23 +564,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) * flags part over to the timer lock. Must not let interrupts in * while we are moving the lock. */ - spin_lock_irqsave(&idr_lock, *flags); - timr = idr_find(&posix_timers_id, (int) timer_id); + timr = idr_find(&posix_timers_id, (int)timer_id); if (timr) { spin_lock(&timr->it_lock); - - if (!timr->it_process || - !same_thread_group(timr->it_process, current)) { - spin_unlock(&timr->it_lock); - spin_unlock_irqrestore(&idr_lock, *flags); - timr = NULL; - } else + if (timr->it_process && + same_thread_group(timr->it_process, current)) { spin_unlock(&idr_lock); - } else - spin_unlock_irqrestore(&idr_lock, *flags); + return timr; + } + spin_unlock(&timr->it_lock); + } + spin_unlock_irqrestore(&idr_lock, *flags); - return timr; + return NULL; } /* -- cgit v0.10.2 From 1b02469088ac7a13d7e622b618b7410d0f1ce5ec Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Mon, 22 Sep 2008 14:42:43 -0700 Subject: hrtimer: reorder struct hrtimer to save 8 bytes on 64bit builds reorder struct hrtimer to save 8 bytes on 64 bit builds when CONFIG_TIMER_STATS selected. (also removes 8 bytes from signal_struct) Signed-off-by: Richard Kennedy Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 68b0196..8730b60 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -115,12 +115,12 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; - enum hrtimer_cb_mode cb_mode; struct list_head cb_entry; + enum hrtimer_cb_mode cb_mode; #ifdef CONFIG_TIMER_STATS + int start_pid; void *start_site; char start_comm[16]; - int start_pid; #endif }; -- cgit v0.10.2 From eb3f938fd6292dc79f43a5fe14784b044776e9f0 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Mon, 22 Sep 2008 14:42:40 -0700 Subject: ntp: let update_persistent_clock() sleep This is a change that makes the 11-minute RTC update be run in the process context. This is so that update_persistent_clock() can sleep, which may be required for certain types of RTC hardware -- most notably I2C devices. Signed-off-by: Maciej W. Rozycki Cc: Roman Zippel Cc: Rik van Riel Cc: David Brownell Acked-by: Alessandro Zummo Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index c6921aa1..450a45c 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -10,13 +10,13 @@ #include #include -#include #include #include #include #include #include #include +#include #include /* @@ -218,11 +218,11 @@ void second_overflow(void) /* Disable the cmos update - used by virtualization and embedded */ int no_sync_cmos_clock __read_mostly; -static void sync_cmos_clock(unsigned long dummy); +static void sync_cmos_clock(struct work_struct *work); -static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); +static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); -static void sync_cmos_clock(unsigned long dummy) +static void sync_cmos_clock(struct work_struct *work) { struct timespec now, next; int fail = 1; @@ -258,13 +258,13 @@ static void sync_cmos_clock(unsigned long dummy) next.tv_sec++; next.tv_nsec -= NSEC_PER_SEC; } - mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); + schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); } static void notify_cmos_timer(void) { if (!no_sync_cmos_clock) - mod_timer(&sync_cmos_timer, jiffies + 1); + schedule_delayed_work(&sync_cmos_work, 0); } #else -- cgit v0.10.2 From 5cd1c9c5cf30d4b33df3d3f74d8142f278d536b7 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Mon, 22 Sep 2008 14:42:43 -0700 Subject: timekeeping: fix rounding problem during clock update Due to a rounding problem during a clock update it's possible for readers to observe the clock jumping back by 1nsec. The following simplified example demonstrates the problem: cycle xtime 0 0 1000 999999.6 2000 1999999.2 3000 2999998.8 ... 1500 = 1499999.4 = 0.0 + 1499999.4 = 999999.6 + 499999.8 When reading the clock only the full nanosecond part is used, while timekeeping internally keeps nanosecond fractions. If the clock is now updated at cycle 1500 here, a nanosecond is missing due to the truncation. The simple fix is to round up the xtime value during the update, this also changes the distance to the reference time, but the adjustment will automatically take care that it stays under control. Signed-off-by: Roman Zippel Signed-off-by: John Stultz Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e91c29f..5ecbfc3 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -454,7 +454,7 @@ void update_wall_time(void) #else offset = clock->cycle_interval; #endif - clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; + clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift; /* normally this loop will run just once, however in the * case of lost or late ticks, it will accumulate correctly. @@ -479,9 +479,12 @@ void update_wall_time(void) /* correct the clock when NTP error is too big */ clocksource_adjust(offset); - /* store full nanoseconds into xtime */ - xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; + /* store full nanoseconds into xtime after rounding it up and + * add the remainder to the error difference. + */ + xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1; clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; + clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift); update_xtime_cache(cyc2ns(clock, offset)); -- cgit v0.10.2 From d40e944c25fb4642adb2a4c580a48218a9f3f824 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Mon, 22 Sep 2008 14:42:44 -0700 Subject: ntp: improve adjtimex frequency rounding Change PPM_SCALE_INV_SHIFT so that it doesn't throw away any input bits (19 is the amount of the factor 2 in PPM_SCALE), the output frequency can then be calculated back to its input value, as the inverse divide produce a slightly larger value, which is then correctly rounded by the final shift. Reported-by: Martin Ziegler Signed-off-by: Roman Zippel Cc: John Stultz Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner diff --git a/include/linux/timex.h b/include/linux/timex.h index c00bcdd..9007313 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -82,7 +82,7 @@ */ #define SHIFT_USEC 16 /* frequency offset scale (shift) */ #define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) -#define PPM_SCALE_INV_SHIFT 20 +#define PPM_SCALE_INV_SHIFT 19 #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ PPM_SCALE + 1) diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 450a45c..ddb0465 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -406,9 +406,8 @@ adj_done: if (time_status & (STA_UNSYNC|STA_CLOCKERR)) result = TIME_ERROR; - txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) * - (s64)PPM_SCALE_INV, - NTP_SCALE_SHIFT); + txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * + (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; -- cgit v0.10.2 From 7c6db4e050601f359081fde418ca6dc4fc2d0011 Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Thu, 25 Sep 2008 21:00:31 +0400 Subject: ACPI: EC: do transaction from interrupt context It is easier and faster to do transaction directly from interrupt context rather than waking control thread. Also, cleaner GPE storm avoidance is implemented. References: http://bugzilla.kernel.org/show_bug.cgi?id=9998 http://bugzilla.kernel.org/show_bug.cgi?id=10724 http://bugzilla.kernel.org/show_bug.cgi?id=10919 http://bugzilla.kernel.org/show_bug.cgi?id=11309 http://bugzilla.kernel.org/show_bug.cgi?id=11549 Signed-off-by: Alexey Starikovskiy Tested-by: Sitsofe Wheeler Signed-off-by: Len Brown diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 13593f9..7f0d81c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1,7 +1,7 @@ /* - * ec.c - ACPI Embedded Controller Driver (v2.0) + * ec.c - ACPI Embedded Controller Driver (v2.1) * - * Copyright (C) 2006, 2007 Alexey Starikovskiy + * Copyright (C) 2006-2008 Alexey Starikovskiy * Copyright (C) 2006 Denis Sadykov * Copyright (C) 2004 Luming Yu * Copyright (C) 2001, 2002 Andy Grover @@ -26,7 +26,7 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -/* Uncomment next line to get verbose print outs*/ +/* Uncomment next line to get verbose printout */ /* #define DEBUG */ #include @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -65,22 +66,21 @@ enum ec_command { ACPI_EC_COMMAND_QUERY = 0x84, }; -/* EC events */ -enum ec_event { - ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */ - ACPI_EC_EVENT_IBF_0, /* Input buffer empty */ -}; - #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */ +#define ACPI_EC_STORM_THRESHOLD 20 /* number of false interrupts + per one transaction */ + enum { - EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */ EC_FLAGS_QUERY_PENDING, /* Query is pending */ - EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */ + EC_FLAGS_GPE_MODE, /* Expect GPE to be sent + * for status change */ EC_FLAGS_NO_GPE, /* Don't use GPE mode */ - EC_FLAGS_RESCHEDULE_POLL /* Re-schedule poll */ + EC_FLAGS_GPE_STORM, /* GPE storm detected */ + EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and + * OpReg are installed */ }; /* If we find an EC via the ECDT, we need to keep a ptr to its context */ @@ -95,6 +95,14 @@ struct acpi_ec_query_handler { u8 query_bit; }; +struct transaction_data { + const u8 *wdata; + u8 *rdata; + unsigned short irq_count; + u8 wlen; + u8 rlen; +}; + static struct acpi_ec { acpi_handle handle; unsigned long gpe; @@ -105,9 +113,8 @@ static struct acpi_ec { struct mutex lock; wait_queue_head_t wait; struct list_head list; - struct delayed_work work; - atomic_t irq_count; - u8 handlers_installed; + struct transaction_data *t; + spinlock_t t_lock; } *boot_ec, *first_ec; /* @@ -150,7 +157,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); pr_debug(PREFIX "---> data = 0x%2.2x\n", x); - return inb(ec->data_addr); + return x; } static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) @@ -165,68 +172,79 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) outb(data, ec->data_addr); } -static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) +static int ec_transaction_done(struct acpi_ec *ec) { - if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags)) - return 0; - if (event == ACPI_EC_EVENT_OBF_1) { - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF) - return 1; - } else if (event == ACPI_EC_EVENT_IBF_0) { - if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)) - return 1; - } - - return 0; + unsigned long flags; + int ret = 0; + spin_lock_irqsave(&ec->t_lock, flags); + if (!ec->t || (!ec->t->wlen && !ec->t->rlen)) + ret = 1; + spin_unlock_irqrestore(&ec->t_lock, flags); + return ret; } -static void ec_schedule_ec_poll(struct acpi_ec *ec) +static void gpe_transaction(struct acpi_ec *ec, u8 status) { - if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags)) - schedule_delayed_work(&ec->work, - msecs_to_jiffies(ACPI_EC_DELAY)); + unsigned long flags; + spin_lock_irqsave(&ec->t_lock, flags); + if (!ec->t) + goto unlock; + if (ec->t->wlen > 0) { + if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_data(ec, *(ec->t->wdata++)); + --ec->t->wlen; + } else + /* false interrupt, state didn't change */ + ++ec->t->irq_count; + + } else if (ec->t->rlen > 0) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + *(ec->t->rdata++) = acpi_ec_read_data(ec); + --ec->t->rlen; + } else + /* false interrupt, state didn't change */ + ++ec->t->irq_count; + } +unlock: + spin_unlock_irqrestore(&ec->t_lock, flags); } -static void ec_switch_to_poll_mode(struct acpi_ec *ec) +static int acpi_ec_wait(struct acpi_ec *ec) { + if (wait_event_timeout(ec->wait, ec_transaction_done(ec), + msecs_to_jiffies(ACPI_EC_DELAY))) + return 0; + /* missing GPEs, switch back to poll mode */ + if (printk_ratelimit()) + pr_info(PREFIX "missing confirmations, " + "switch off interrupt mode.\n"); set_bit(EC_FLAGS_NO_GPE, &ec->flags); clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); - acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); - set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags); + return 1; } -static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) +static void acpi_ec_gpe_query(void *ec_cxt); + +static int ec_check_sci(struct acpi_ec *ec, u8 state) { - atomic_set(&ec->irq_count, 0); - if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) && - likely(!force_poll)) { - if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event), - msecs_to_jiffies(ACPI_EC_DELAY))) - return 0; - clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); - if (acpi_ec_check_status(ec, event)) { - /* missing GPEs, switch back to poll mode */ - if (printk_ratelimit()) - pr_info(PREFIX "missing confirmations, " - "switch off interrupt mode.\n"); - ec_switch_to_poll_mode(ec); - ec_schedule_ec_poll(ec); - return 0; - } - } else { - unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); - clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); - while (time_before(jiffies, delay)) { - if (acpi_ec_check_status(ec, event)) - return 0; - msleep(1); - } - if (acpi_ec_check_status(ec,event)) + if (state & ACPI_EC_FLAG_SCI) { + if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) + return acpi_os_execute(OSL_EC_BURST_HANDLER, + acpi_ec_gpe_query, ec); + } + return 0; +} + +static int ec_poll(struct acpi_ec *ec) +{ + unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); + msleep(1); + while (time_before(jiffies, delay)) { + gpe_transaction(ec, acpi_ec_read_status(ec)); + msleep(1); + if (ec_transaction_done(ec)) return 0; } - pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n", - acpi_ec_read_status(ec), - (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\""); return -ETIME; } @@ -235,45 +253,51 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, u8 * rdata, unsigned rdata_len, int force_poll) { - int result = 0; - set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); + unsigned long tmp; + struct transaction_data t = {.wdata = wdata, .rdata = rdata, + .wlen = wdata_len, .rlen = rdata_len, + .irq_count = 0}; + int ret = 0; pr_debug(PREFIX "transaction start\n"); - acpi_ec_write_cmd(ec, command); - for (; wdata_len > 0; --wdata_len) { - result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll); - if (result) { - pr_err(PREFIX - "write_cmd timeout, command = %d\n", command); - goto end; - } - set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); - acpi_ec_write_data(ec, *(wdata++)); + /* disable GPE during transaction if storm is detected */ + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { + clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); + acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); } - - if (!rdata_len) { - result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll); - if (result) { - pr_err(PREFIX - "finish-write timeout, command = %d\n", command); - goto end; - } - } else if (command == ACPI_EC_COMMAND_QUERY) + /* start transaction */ + spin_lock_irqsave(&ec->t_lock, tmp); + /* following two actions should be kept atomic */ + ec->t = &t; + acpi_ec_write_cmd(ec, command); + if (command == ACPI_EC_COMMAND_QUERY) clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - - for (; rdata_len > 0; --rdata_len) { - result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll); - if (result) { - pr_err(PREFIX "read timeout, command = %d\n", command); - goto end; - } - /* Don't expect GPE after last read */ - if (rdata_len > 1) - set_bit(EC_FLAGS_WAIT_GPE, &ec->flags); - *(rdata++) = acpi_ec_read_data(ec); - } - end: + spin_unlock_irqrestore(&ec->t_lock, tmp); + /* if we selected poll mode or failed in GPE-mode do a poll loop */ + if (force_poll || + !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) || + acpi_ec_wait(ec)) + ret = ec_poll(ec); pr_debug(PREFIX "transaction end\n"); - return result; + spin_lock_irqsave(&ec->t_lock, tmp); + ec->t = NULL; + spin_unlock_irqrestore(&ec->t_lock, tmp); + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { + /* check if we received SCI during transaction */ + ec_check_sci(ec, acpi_ec_read_status(ec)); + /* it is safe to enable GPE outside of transaction */ + acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); + } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) && + t.irq_count > ACPI_EC_STORM_THRESHOLD) { + pr_debug(PREFIX "GPE storm detected\n"); + set_bit(EC_FLAGS_GPE_STORM, &ec->flags); + } + return ret; +} + +static int ec_check_ibf0(struct acpi_ec *ec) +{ + u8 status = acpi_ec_read_status(ec); + return (status & ACPI_EC_FLAG_IBF) == 0; } static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, @@ -283,40 +307,34 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, { int status; u32 glk; - if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata)) return -EINVAL; - if (rdata) memset(rdata, 0, rdata_len); - mutex_lock(&ec->lock); if (ec->global_lock) { status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); if (ACPI_FAILURE(status)) { - mutex_unlock(&ec->lock); - return -ENODEV; + status = -ENODEV; + goto unlock; } } - - status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0); - if (status) { + if (!wait_event_timeout(ec->wait, ec_check_ibf0(ec), + msecs_to_jiffies(ACPI_EC_DELAY))) { pr_err(PREFIX "input buffer is not empty, " "aborting transaction\n"); + status = -ETIME; goto end; } - status = acpi_ec_transaction_unlocked(ec, command, wdata, wdata_len, rdata, rdata_len, force_poll); - - end: - +end: if (ec->global_lock) acpi_release_global_lock(glk); +unlock: mutex_unlock(&ec->lock); - return status; } @@ -332,7 +350,9 @@ int acpi_ec_burst_enable(struct acpi_ec *ec) int acpi_ec_burst_disable(struct acpi_ec *ec) { - return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0); + return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? + acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, + NULL, 0, NULL, 0, 0) : 0; } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) @@ -513,46 +533,26 @@ static void acpi_ec_gpe_query(void *ec_cxt) static u32 acpi_ec_gpe_handler(void *data) { - acpi_status status = AE_OK; struct acpi_ec *ec = data; - u8 state = acpi_ec_read_status(ec); + u8 status; pr_debug(PREFIX "~~~> interrupt\n"); - atomic_inc(&ec->irq_count); - if (atomic_read(&ec->irq_count) > 5) { - pr_err(PREFIX "GPE storm detected, disabling EC GPE\n"); - ec_switch_to_poll_mode(ec); - goto end; - } - clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); - if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) + status = acpi_ec_read_status(ec); + + gpe_transaction(ec, status); + if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) wake_up(&ec->wait); - if (state & ACPI_EC_FLAG_SCI) { - if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - status = acpi_os_execute(OSL_EC_BURST_HANDLER, - acpi_ec_gpe_query, ec); - } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) && - !test_bit(EC_FLAGS_NO_GPE, &ec->flags) && - in_interrupt()) { + ec_check_sci(ec, status); + if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) && + !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) { /* this is non-query, must be confirmation */ if (printk_ratelimit()) pr_info(PREFIX "non-query interrupt received," " switching to interrupt mode\n"); set_bit(EC_FLAGS_GPE_MODE, &ec->flags); - clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags); } -end: - ec_schedule_ec_poll(ec); - return ACPI_SUCCESS(status) ? - ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; -} - -static void do_ec_poll(struct work_struct *work) -{ - struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work); - atomic_set(&ec->irq_count, 0); - (void)acpi_ec_gpe_handler(ec); + return ACPI_INTERRUPT_HANDLED; } /* -------------------------------------------------------------------------- @@ -696,8 +696,7 @@ static struct acpi_ec *make_acpi_ec(void) mutex_init(&ec->lock); init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); - INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll); - atomic_set(&ec->irq_count, 0); + spin_lock_init(&ec->t_lock); return ec; } @@ -736,22 +735,15 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) return AE_CTRL_TERMINATE; } -static void ec_poll_stop(struct acpi_ec *ec) -{ - clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags); - cancel_delayed_work(&ec->work); -} - static void ec_remove_handlers(struct acpi_ec *ec) { - ec_poll_stop(ec); if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) pr_err(PREFIX "failed to remove space handler\n"); if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler))) pr_err(PREFIX "failed to remove gpe handler\n"); - ec->handlers_installed = 0; + clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); } static int acpi_ec_add(struct acpi_device *device) @@ -846,17 +838,15 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) static int ec_install_handlers(struct acpi_ec *ec) { acpi_status status; - if (ec->handlers_installed) + if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) return 0; status = acpi_install_gpe_handler(NULL, ec->gpe, - ACPI_GPE_EDGE_TRIGGERED, - &acpi_ec_gpe_handler, ec); + ACPI_GPE_EDGE_TRIGGERED, + &acpi_ec_gpe_handler, ec); if (ACPI_FAILURE(status)) return -ENODEV; - acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); - status = acpi_install_address_space_handler(ec->handle, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, @@ -866,7 +856,7 @@ static int ec_install_handlers(struct acpi_ec *ec) return -ENODEV; } - ec->handlers_installed = 1; + set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); return 0; } @@ -887,7 +877,6 @@ static int acpi_ec_start(struct acpi_device *device) /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - ec_schedule_ec_poll(ec); return ret; } @@ -906,7 +895,7 @@ static int acpi_ec_stop(struct acpi_device *device, int type) int __init acpi_boot_ec_enable(void) { - if (!boot_ec || boot_ec->handlers_installed) + if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags)) return 0; if (!ec_install_handlers(boot_ec)) { first_ec = boot_ec; -- cgit v0.10.2 From 6a9037887ccea92152b034edeb15d453d1a98555 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 22 Sep 2008 14:06:01 -0700 Subject: power_supply: fix dependency of tosa_battery tosa_battery should also depend on wm97xx_ts as it uses dac-accessing functions from that module. Signed-off-by: Dmitry Baryshkov Signed-off-by: Andrew Morton Signed-off-by: Anton Vorontsov diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index b2bd104..ae095a4 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -51,7 +51,7 @@ config BATTERY_OLPC config BATTERY_TOSA tristate "Sharp SL-6000 (tosa) battery" - depends on MACH_TOSA && MFD_TC6393XB + depends on MACH_TOSA && MFD_TC6393XB && TOUCHSCREEN_WM97XX help Say Y to enable support for the battery on the Sharp Zaurus SL-6000 (tosa) models. -- cgit v0.10.2 From 8aef7e8f8de2d900da892085edbf14ea35fe6881 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Mon, 22 Sep 2008 14:53:50 -0700 Subject: bq27x00_battery: use unaligned access helper Remove hand-rolled get_unaligned_be16, this points to a possible bug as bq27x00_read does another endian byteswap which sparse notices: drivers/power/bq27x00_battery.c:81:14: warning: cast to restricted __be16 Which should probably be checked. Signed-off-by: Harvey Harrison Cc: Rodolfo Giometti Signed-off-by: Andrew Morton Signed-off-by: Anton Vorontsov diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c index 62d4948..0c056fc 100644 --- a/drivers/power/bq27x00_battery.c +++ b/drivers/power/bq27x00_battery.c @@ -23,8 +23,8 @@ #include #include #include - #include +#include #define DRIVER_VERSION "1.0.0" @@ -33,7 +33,6 @@ #define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */ #define BQ27x00_REG_AI 0x14 #define BQ27x00_REG_FLAGS 0x0A -#define HIGH_BYTE(A) ((A) << 8) /* If the system has several batteries we need a different name for each * of them... @@ -239,7 +238,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single, err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { if (!b_single) - *rt_value = data[1] | HIGH_BYTE(data[0]); + *rt_value = get_unaligned_be16(data); else *rt_value = data[0]; -- cgit v0.10.2 From 0e4a008a4f389b468cfe8b58c7d77882a6e25695 Mon Sep 17 00:00:00 2001 From: Julien Brunel Date: Fri, 26 Sep 2008 15:27:25 +0200 Subject: UBI: fix IS_ERR test In case of error, the function add_volume returns an ERR pointer. The result of IS_ERR, which is supposed to be used in a test as it is, is here checked to be less than zero, which seems odd. We suggest to replace this test by a simple IS_ERR test. A simplified version of the semantic match that finds this problem is as follows: (http://www.emn.fr/x-info/coccinelle/) // @def0@ expression x; position p0; @@ x@p0 = add_volume(...) @protected@ expression def0.x,E; position def0.p0; position p; statement S; @@ x@p0 ... when != x = E if (!IS_ERR(x) && ...) {<... x@p ...>} else S @unprotected@ expression def0.x,E; identifier fld; position def0.p0; position p != protected.p; @@ x@p0 ... when != x = E * x@p->fld // Signed-off-by: Julien Brunel Signed-off-by: Julia Lawall Signed-off-by: Artem Bityutskiy diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 967bb44..4f2daa5 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -387,7 +387,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, pnum, vol_id, lnum, ec, sqnum, bitflips); sv = add_volume(si, vol_id, pnum, vid_hdr); - if (IS_ERR(sv) < 0) + if (IS_ERR(sv)) return PTR_ERR(sv); if (si->max_sqnum < sqnum) -- cgit v0.10.2 From 77cd62e8082b9743b59ee1946a4c3ee2e3cd2bce Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Fri, 26 Sep 2008 17:00:11 -0700 Subject: fsldma: allow Freescale Elo DMA driver to be compiled as a module Modify the Freescale Elo / Elo Plus DMA driver so that it can be compiled as a module. The primary change is to stop treating the DMA controller as a bus, and the DMA channels as devices on the bus. This is because the Open Firmware (OF) kernel code does not allow busses to be removed, so although we can call of_platform_bus_probe() to probe the DMA channels, there is no of_platform_bus_remove(). Instead, the DMA channels are manually probed, similar to what fsl_elbc_nand.c does. Cc: Scott Wood Acked-by: Li Yang Signed-off-by: Timur Tabi Signed-off-by: Dan Williams diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cd30390..904e575 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -48,13 +48,13 @@ config DW_DMAC can be integrated in chips such as the Atmel AT32ap7000. config FSL_DMA - bool "Freescale MPC85xx/MPC83xx DMA support" - depends on PPC + tristate "Freescale Elo and Elo Plus DMA support" + depends on FSL_SOC select DMA_ENGINE ---help--- - Enable support for the Freescale DMA engine. Now, it support - MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. - The MPC8349, MPC8360 is also supported. + Enable support for the Freescale Elo and Elo Plus DMA controllers. + The Elo is the DMA controller on some 82xx and 83xx parts, and the + Elo Plus is the DMA controller on 85xx and 86xx parts. config MV_XOR bool "Marvell XOR engine support" diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e9b2638..0b95dcc 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -370,7 +370,10 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, struct dma_client *client) { struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); - LIST_HEAD(tmp_list); + + /* Has this channel already been allocated? */ + if (fsl_chan->desc_pool) + return 1; /* We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. @@ -410,6 +413,8 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan) } spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); dma_pool_destroy(fsl_chan->desc_pool); + + fsl_chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * @@ -786,33 +791,29 @@ static void dma_do_tasklet(unsigned long data) fsl_chan_ld_cleanup(fsl_chan); } -static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, - const struct of_device_id *match) +static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, + struct device_node *node, u32 feature, const char *compatible) { - struct fsl_dma_device *fdev; struct fsl_dma_chan *new_fsl_chan; int err; - fdev = dev_get_drvdata(dev->dev.parent); - BUG_ON(!fdev); - /* alloc channel */ new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); if (!new_fsl_chan) { - dev_err(&dev->dev, "No free memory for allocating " + dev_err(fdev->dev, "No free memory for allocating " "dma channels!\n"); return -ENOMEM; } /* get dma channel register base */ - err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); + err = of_address_to_resource(node, 0, &new_fsl_chan->reg); if (err) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - dev->node->full_name); + dev_err(fdev->dev, "Can't get %s property 'reg'\n", + node->full_name); goto err_no_reg; } - new_fsl_chan->feature = *(u32 *)match->data; + new_fsl_chan->feature = feature; if (!fdev->feature) fdev->feature = new_fsl_chan->feature; @@ -822,13 +823,13 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, */ WARN_ON(fdev->feature != new_fsl_chan->feature); - new_fsl_chan->dev = &dev->dev; + new_fsl_chan->dev = &new_fsl_chan->common.dev; new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { - dev_err(&dev->dev, "There is no %d channel!\n", + dev_err(fdev->dev, "There is no %d channel!\n", new_fsl_chan->id); err = -EINVAL; goto err_no_chan; @@ -862,20 +863,20 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, &fdev->common.channels); fdev->common.chancnt++; - new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); + new_fsl_chan->irq = irq_of_parse_and_map(node, 0); if (new_fsl_chan->irq != NO_IRQ) { err = request_irq(new_fsl_chan->irq, &fsl_dma_chan_do_interrupt, IRQF_SHARED, "fsldma-channel", new_fsl_chan); if (err) { - dev_err(&dev->dev, "DMA channel %s request_irq error " - "with return %d\n", dev->node->full_name, err); + dev_err(fdev->dev, "DMA channel %s request_irq error " + "with return %d\n", node->full_name, err); goto err_no_irq; } } - dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, - match->compatible, new_fsl_chan->irq); + dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, + compatible, new_fsl_chan->irq); return 0; @@ -888,38 +889,20 @@ err_no_reg: return err; } -const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; -const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; - -static struct of_device_id of_fsl_dma_chan_ids[] = { - { - .compatible = "fsl,eloplus-dma-channel", - .data = (void *)&mpc8540_dma_ip_feature, - }, - { - .compatible = "fsl,elo-dma-channel", - .data = (void *)&mpc8349_dma_ip_feature, - }, - {} -}; - -static struct of_platform_driver of_fsl_dma_chan_driver = { - .name = "of-fsl-dma-channel", - .match_table = of_fsl_dma_chan_ids, - .probe = of_fsl_dma_chan_probe, -}; - -static __init int of_fsl_dma_chan_init(void) +static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) { - return of_register_platform_driver(&of_fsl_dma_chan_driver); + free_irq(fchan->irq, fchan); + list_del(&fchan->common.device_node); + iounmap(fchan->reg_base); + kfree(fchan); } static int __devinit of_fsl_dma_probe(struct of_device *dev, const struct of_device_id *match) { int err; - unsigned int irq; struct fsl_dma_device *fdev; + struct device_node *child; fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); if (!fdev) { @@ -953,9 +936,9 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.dev = &dev->dev; - irq = irq_of_parse_and_map(dev->node, 0); - if (irq != NO_IRQ) { - err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, + fdev->irq = irq_of_parse_and_map(dev->node, 0); + if (fdev->irq != NO_IRQ) { + err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, "fsldma-device", fdev); if (err) { dev_err(&dev->dev, "DMA device request_irq error " @@ -965,7 +948,21 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, } dev_set_drvdata(&(dev->dev), fdev); - of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); + + /* We cannot use of_platform_bus_probe() because there is no + * of_platform_bus_remove. Instead, we manually instantiate every DMA + * channel object. + */ + for_each_child_of_node(dev->node, child) { + if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) + fsl_dma_chan_probe(fdev, child, + FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, + "fsl,eloplus-dma-channel"); + if (of_device_is_compatible(child, "fsl,elo-dma-channel")) + fsl_dma_chan_probe(fdev, child, + FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, + "fsl,elo-dma-channel"); + } dma_async_device_register(&fdev->common); return 0; @@ -977,6 +974,30 @@ err_no_reg: return err; } +static int of_fsl_dma_remove(struct of_device *of_dev) +{ + struct fsl_dma_device *fdev; + unsigned int i; + + fdev = dev_get_drvdata(&of_dev->dev); + + dma_async_device_unregister(&fdev->common); + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) + if (fdev->chan[i]) + fsl_dma_chan_remove(fdev->chan[i]); + + if (fdev->irq != NO_IRQ) + free_irq(fdev->irq, fdev); + + iounmap(fdev->reg_base); + + kfree(fdev); + dev_set_drvdata(&of_dev->dev, NULL); + + return 0; +} + static struct of_device_id of_fsl_dma_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, @@ -984,15 +1005,32 @@ static struct of_device_id of_fsl_dma_ids[] = { }; static struct of_platform_driver of_fsl_dma_driver = { - .name = "of-fsl-dma", + .name = "fsl-elo-dma", .match_table = of_fsl_dma_ids, .probe = of_fsl_dma_probe, + .remove = of_fsl_dma_remove, }; static __init int of_fsl_dma_init(void) { - return of_register_platform_driver(&of_fsl_dma_driver); + int ret; + + pr_info("Freescale Elo / Elo Plus DMA driver\n"); + + ret = of_register_platform_driver(&of_fsl_dma_driver); + if (ret) + pr_err("fsldma: failed to register platform driver\n"); + + return ret; +} + +static void __exit of_fsl_dma_exit(void) +{ + of_unregister_platform_driver(&of_fsl_dma_driver); } -subsys_initcall(of_fsl_dma_chan_init); subsys_initcall(of_fsl_dma_init); +module_exit(of_fsl_dma_exit); + +MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 6faf07b..4f21a51 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -114,6 +114,7 @@ struct fsl_dma_device { struct dma_device common; struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; u32 feature; /* The same as DMA channels */ + int irq; /* Channel IRQ */ }; /* Define macros for fsl_dma_chan->feature property */ -- cgit v0.10.2 From 3afe7eb37f4d47f31d30a81c1b42ca02eab01e44 Mon Sep 17 00:00:00 2001 From: Alexander Belyakov Date: Thu, 25 Sep 2008 17:53:24 +0400 Subject: [MTD] [NOR] fix cfi_cmdset_0001 FL_SYNCING race (take 2) The patch fixes CFI issue with multipartitional devices leading to the set of errors or even deadlock. The problem is CFI FL_SYNCING state race with flash operations (e.g. erase suspend). It is reproduced by running intensive writes on one JFFS2 partition and simultaneously performing mount/unmount cycle on another partition of the same chip. Signed-off-by: Alexander Belyakov Acked-by: Nicolas Pitre Signed-off-by: David Woodhouse diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 5157e3c..c93a8be 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -725,6 +725,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long struct cfi_pri_intelext *cfip = cfi->cmdset_priv; unsigned long timeo = jiffies + HZ; + /* Prevent setting state FL_SYNCING for chip in suspended state. */ + if (mode == FL_SYNCING && chip->oldstate != FL_READY) + goto sleep; + switch (chip->state) { case FL_STATUS: @@ -830,8 +834,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr DECLARE_WAITQUEUE(wait, current); retry: - if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING - || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) { + if (chip->priv && + (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE + || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) { /* * OK. We have possibility for contention on the write/erase * operations which are global to the real chip and not per @@ -881,6 +886,14 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr return ret; } spin_lock(&shared->lock); + + /* We should not own chip if it is already + * in FL_SYNCING state. Put contender and retry. */ + if (chip->state == FL_SYNCING) { + put_chip(map, contender, contender->start); + spin_unlock(contender->mutex); + goto retry; + } spin_unlock(contender->mutex); } -- cgit v0.10.2 From e416de5e61e1a9b7f987804cbb67230b5f5293c6 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 23 Sep 2008 17:25:10 +0100 Subject: Export the ROM enable/disable helpers .... so that they can be used by MTD map drivers. Lets us close #9420 Signed-off-by: Alan Cox Signed-off-by: David Woodhouse diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index bd5c0e0..1f5f614 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -21,7 +21,7 @@ * between the ROM and other resources, so enabling it may disable access * to MMIO registers or other card memory. */ -static int pci_enable_rom(struct pci_dev *pdev) +int pci_enable_rom(struct pci_dev *pdev) { struct resource *res = pdev->resource + PCI_ROM_RESOURCE; struct pci_bus_region region; @@ -45,7 +45,7 @@ static int pci_enable_rom(struct pci_dev *pdev) * Disable ROM decoding on a PCI device by turning off the last bit in the * ROM BAR. */ -static void pci_disable_rom(struct pci_dev *pdev) +void pci_disable_rom(struct pci_dev *pdev) { u32 rom_addr; pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); @@ -260,3 +260,5 @@ void pci_cleanup_rom(struct pci_dev *pdev) EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); +EXPORT_SYMBOL_GPL(pci_enable_rom); +EXPORT_SYMBOL_GPL(pci_disable_rom); diff --git a/include/linux/pci.h b/include/linux/pci.h index c0e1400..7a4cee0 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -631,6 +631,8 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i); int pci_select_bars(struct pci_dev *dev, unsigned long flags); /* ROM control related routines */ +int pci_enable_rom(struct pci_dev *pdev); +void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); size_t pci_get_rom_size(void __iomem *rom, size_t size); -- cgit v0.10.2 From 4ab13943612673ef0822e1a041a9e629ba13a87c Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 23 Sep 2008 17:25:10 +0100 Subject: [MTD] [NOR] intel_dc21285 switch to ROM API Now that the needed helpers are exported, it becomes a nice simple switch over. Closes #9420 Signed-off-by: Alan Cox Signed-off-by: David Woodhouse diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index 5c6a25c..d978c2e 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -203,15 +203,8 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map) * not enabled, should we be allocating a new resource for it * or simply enabling it? */ - if (!(pci_resource_flags(dev, PCI_ROM_RESOURCE) & - IORESOURCE_ROM_ENABLE)) { - u32 val; - pci_resource_flags(dev, PCI_ROM_RESOURCE) |= IORESOURCE_ROM_ENABLE; - pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val); - val |= PCI_ROM_ADDRESS_ENABLE; - pci_write_config_dword(dev, PCI_ROM_ADDRESS, val); - printk("%s: enabling expansion ROM\n", pci_name(dev)); - } + pci_enable_rom(dev); + printk("%s: enabling expansion ROM\n", pci_name(dev)); } if (!len || !base) @@ -240,10 +233,7 @@ intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map) /* * We need to undo the PCI BAR2/PCI ROM BAR address alteration. */ - pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~IORESOURCE_ROM_ENABLE; - pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val); - val &= ~PCI_ROM_ADDRESS_ENABLE; - pci_write_config_dword(dev, PCI_ROM_ADDRESS, val); + pci_disable_rom(dev); } static unsigned long -- cgit v0.10.2 From f324277cf70ad284dd99acf5ac5101e32bc8c55b Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Mon, 22 Sep 2008 14:49:52 -0700 Subject: [MTD] [MAPS] Maps: make uclinux mapping driver depend on MTD_RAM ...since it only probes that Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 3ae76ec..5ea1693 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -491,7 +491,7 @@ config MTD_BFIN_ASYNC config MTD_UCLINUX tristate "Generic uClinux RAM/ROM filesystem support" - depends on MTD_PARTITIONS && !MMU + depends on MTD_PARTITIONS && MTD_RAM && !MMU help Map driver to support image based filesystems for uClinux. -- cgit v0.10.2 From 7086efe1c1536f6bc160e7d60a9bfd645b91f279 Mon Sep 17 00:00:00 2001 From: Frank Mayhar Date: Fri, 12 Sep 2008 09:54:39 -0700 Subject: timers: fix itimer/many thread hang, v3 - fix UP lockup - another set of UP/SMP cleanups and simplifications Signed-off-by: Frank Mayhar Signed-off-by: Ingo Molnar diff --git a/include/linux/sched.h b/include/linux/sched.h index b982fb4..23d9d54 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2134,7 +2134,6 @@ static inline int thread_group_cputime_clone_thread(struct task_struct *curr) return thread_group_cputime_alloc(curr); } - static inline void thread_group_cputime_free(struct signal_struct *sig) { free_percpu(sig->cputime.totals); diff --git a/kernel/sched.c b/kernel/sched.c index 260c22c..29a3152 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4046,7 +4046,6 @@ unsigned long long task_delta_exec(struct task_struct *p) unsigned long flags; u64 ns = 0; - rq = task_rq_lock(p, &flags); if (task_current(rq, p)) { u64 delta_exec; diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index d6903bd..b8c1569 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -276,133 +276,83 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) * on CONFIG_SCHEDSTATS. */ -#ifdef CONFIG_SMP - /** - * thread_group_cputime_account_user - Maintain utime for a thread group. + * account_group_user_time - Maintain utime for a thread group. * - * @tgtimes: Pointer to thread_group_cputime structure. - * @cputime: Time value by which to increment the utime field of that - * structure. + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the utime field of the + * thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the utime field there. */ -static inline void thread_group_cputime_account_user( - struct thread_group_cputime *tgtimes, - cputime_t cputime) +static inline void account_group_user_time(struct task_struct *tsk, + cputime_t cputime) { - if (tgtimes->totals) { + struct signal_struct *sig; + + sig = tsk->signal; + if (unlikely(!sig)) + return; + if (sig->cputime.totals) { struct task_cputime *times; - times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times = per_cpu_ptr(sig->cputime.totals, get_cpu()); times->utime = cputime_add(times->utime, cputime); put_cpu_no_resched(); } } /** - * thread_group_cputime_account_system - Maintain stime for a thread group. + * account_group_system_time - Maintain stime for a thread group. * - * @tgtimes: Pointer to thread_group_cputime structure. - * @cputime: Time value by which to increment the stime field of that - * structure. + * @tsk: Pointer to task structure. + * @cputime: Time value by which to increment the stime field of the + * thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the stime field there. */ -static inline void thread_group_cputime_account_system( - struct thread_group_cputime *tgtimes, - cputime_t cputime) +static inline void account_group_system_time(struct task_struct *tsk, + cputime_t cputime) { - if (tgtimes->totals) { + struct signal_struct *sig; + + sig = tsk->signal; + if (unlikely(!sig)) + return; + if (sig->cputime.totals) { struct task_cputime *times; - times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times = per_cpu_ptr(sig->cputime.totals, get_cpu()); times->stime = cputime_add(times->stime, cputime); put_cpu_no_resched(); } } /** - * thread_group_cputime_account_exec_runtime - Maintain exec runtime for a - * thread group. + * account_group_exec_runtime - Maintain exec runtime for a thread group. * - * @tgtimes: Pointer to thread_group_cputime structure. + * @tsk: Pointer to task structure. * @ns: Time value by which to increment the sum_exec_runtime field - * of that structure. + * of the thread_group_cputime structure. * * If thread group time is being maintained, get the structure for the * running CPU and update the sum_exec_runtime field there. */ -static inline void thread_group_cputime_account_exec_runtime( - struct thread_group_cputime *tgtimes, - unsigned long long ns) +static inline void account_group_exec_runtime(struct task_struct *tsk, + unsigned long long ns) { - if (tgtimes->totals) { + struct signal_struct *sig; + + sig = tsk->signal; + if (unlikely(!sig)) + return; + if (sig->cputime.totals) { struct task_cputime *times; - times = per_cpu_ptr(tgtimes->totals, get_cpu()); + times = per_cpu_ptr(sig->cputime.totals, get_cpu()); times->sum_exec_runtime += ns; put_cpu_no_resched(); } } - -#else /* CONFIG_SMP */ - -static inline void thread_group_cputime_account_user( - struct thread_group_cputime *tgtimes, - cputime_t cputime) -{ - tgtimes->totals->utime = cputime_add(tgtimes->totals->utime, cputime); -} - -static inline void thread_group_cputime_account_system( - struct thread_group_cputime *tgtimes, - cputime_t cputime) -{ - tgtimes->totals->stime = cputime_add(tgtimes->totals->stime, cputime); -} - -static inline void thread_group_cputime_account_exec_runtime( - struct thread_group_cputime *tgtimes, - unsigned long long ns) -{ - tgtimes->totals->sum_exec_runtime += ns; -} - -#endif /* CONFIG_SMP */ - -/* - * These are the generic time-accounting routines that use the above - * functions. They are the functions actually called by the scheduler. - */ -static inline void account_group_user_time(struct task_struct *tsk, - cputime_t cputime) -{ - struct signal_struct *sig; - - sig = tsk->signal; - if (likely(sig)) - thread_group_cputime_account_user(&sig->cputime, cputime); -} - -static inline void account_group_system_time(struct task_struct *tsk, - cputime_t cputime) -{ - struct signal_struct *sig; - - sig = tsk->signal; - if (likely(sig)) - thread_group_cputime_account_system(&sig->cputime, cputime); -} - -static inline void account_group_exec_runtime(struct task_struct *tsk, - unsigned long long ns) -{ - struct signal_struct *sig; - - sig = tsk->signal; - if (likely(sig)) - thread_group_cputime_account_exec_runtime(&sig->cputime, ns); -} -- cgit v0.10.2 From 88856d67cf6b787447889915bafd541be20b8630 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 29 Sep 2008 19:43:44 +0900 Subject: sh: Fix up uaccess_64 put/get_user() cast warnings. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 5580fd4..0042c90 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -26,16 +26,20 @@ do { \ retval = 0; \ switch (size) { \ case 1: \ - retval = __get_user_asm_b(x, ptr); \ + retval = __get_user_asm_b((void *)&x, \ + (long)ptr); \ break; \ case 2: \ - retval = __get_user_asm_w(x, ptr); \ + retval = __get_user_asm_w((void *)&x, \ + (long)ptr); \ break; \ case 4: \ - retval = __get_user_asm_l(x, ptr); \ + retval = __get_user_asm_l((void *)&x, \ + (long)ptr); \ break; \ case 8: \ - retval = __get_user_asm_q(x, ptr); \ + retval = __get_user_asm_q((void *)&x, \ + (long)ptr); \ break; \ default: \ __get_user_unknown(); \ @@ -54,16 +58,20 @@ do { \ retval = 0; \ switch (size) { \ case 1: \ - retval = __put_user_asm_b(x, ptr); \ + retval = __put_user_asm_b((void *)&x, \ + (long)ptr); \ break; \ case 2: \ - retval = __put_user_asm_w(x, ptr); \ + retval = __put_user_asm_w((void *)&x, \ + (long)ptr); \ break; \ case 4: \ - retval = __put_user_asm_l(x, ptr); \ + retval = __put_user_asm_l((void *)&x, \ + (long)ptr); \ break; \ case 8: \ - retval = __put_user_asm_q(x, ptr); \ + retval = __put_user_asm_q((void *)&x, \ + (long)ptr); \ break; \ default: \ __put_user_unknown(); \ -- cgit v0.10.2 From 091db04559a62a00769553c9120623c2f6bc8b4d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 29 Sep 2008 19:44:40 +0900 Subject: sh: Fix up signal_64 cast warnings. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0582ae4..ce3e851 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -563,7 +563,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; if (__copy_to_user(frame->retcode, - (unsigned long long)sa_default_restorer & (~1), 16) != 0) + (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) goto give_sigsegv; /* Cohere the trampoline with the I-cache. */ @@ -681,7 +681,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; if (__copy_to_user(frame->retcode, - (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0) + (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) goto give_sigsegv; flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); -- cgit v0.10.2 From 50b72e600b62bcdf40971e55f609cf4771346cc1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 29 Sep 2008 19:45:16 +0900 Subject: sh: sh_ksyms_64 needs __strncpy_from_user() definition. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 0042c90..56fd20b 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -85,5 +85,7 @@ extern long __put_user_asm_q(void *, long); extern void __put_user_unknown(void); extern long __strnlen_user(const char *__s, long __n); +extern int __strncpy_from_user(unsigned long __dest, + unsigned long __user __src, int __count); #endif /* __ASM_SH_UACCESS_64_H */ -- cgit v0.10.2 From 4d01cdafbafc0fdeb730838ca38a48e5ca2894cd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 29 Sep 2008 20:09:17 +0900 Subject: sh: SH-5 clk fwk support. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile index 8646363..ce4602e 100644 --- a/arch/sh/kernel/cpu/sh5/Makefile +++ b/arch/sh/kernel/cpu/sh5/Makefile @@ -5,3 +5,8 @@ obj-y := entry.o probe.o switchto.o obj-$(CONFIG_SH_FPU) += fpu.o obj-$(CONFIG_KALLSYMS) += unwind.o + +# Primary on-chip clocks (common) +clock-$(CONFIG_CPU_SH5) := clock-sh5.o + +obj-y += $(clock-y) diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c new file mode 100644 index 0000000..52c4924 --- /dev/null +++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c @@ -0,0 +1,79 @@ +/* + * arch/sh/kernel/cpu/sh5/clock-sh5.c + * + * SH-5 support for the clock framework + * + * Copyright (C) 2008 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include + +static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 }; + +/* Clock, Power and Reset Controller */ +#define CPRC_BLOCK_OFF 0x01010000 +#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF) + +static unsigned long cprc_base; + +static void master_clk_init(struct clk *clk) +{ + int idx = (ctrl_inl(cprc_base + 0x00) >> 6) & 0x0007; + clk->rate *= ifc_table[idx]; +} + +static struct clk_ops sh5_master_clk_ops = { + .init = master_clk_init, +}; + +static void module_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(cprc_base) >> 12) & 0x0007; + clk->rate = clk->parent->rate / ifc_table[idx]; +} + +static struct clk_ops sh5_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static void bus_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(cprc_base) >> 3) & 0x0007; + clk->rate = clk->parent->rate / ifc_table[idx]; +} + +static struct clk_ops sh5_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(cprc_base) & 0x0007); + clk->rate = clk->parent->rate / ifc_table[idx]; +} + +static struct clk_ops sh5_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh5_clk_ops[] = { + &sh5_master_clk_ops, + &sh5_module_clk_ops, + &sh5_bus_clk_ops, + &sh5_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC"); + BUG_ON(!cprc_base); + + if (idx < ARRAY_SIZE(sh5_clk_ops)) + *ops = sh5_clk_ops[idx]; +} diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c index 791edab..bbb2af1 100644 --- a/arch/sh/kernel/time_64.c +++ b/arch/sh/kernel/time_64.c @@ -39,6 +39,7 @@ #include #include #include +#include #define TMU_TOCR_INIT 0x00 #define TMU0_TCR_INIT 0x0020 @@ -51,14 +52,6 @@ #define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */ #define RTC_RCR1 (rtc_base + 0x38) -/* Clock, Power and Reset Controller */ -#define CPRC_BLOCK_OFF 0x01010000 -#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF - -#define FRQCR (cprc_base+0x0) -#define WTCSR (cprc_base+0x0018) -#define STBCR (cprc_base+0x0030) - /* Time Management Unit */ #define TMU_BLOCK_OFF 0x01020000 #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF @@ -293,103 +286,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } - -static __init unsigned int get_cpu_hz(void) -{ - unsigned int count; - unsigned long __dummy; - unsigned long ctc_val_init, ctc_val; - - /* - ** Regardless the toolchain, force the compiler to use the - ** arbitrary register r3 as a clock tick counter. - ** NOTE: r3 must be in accordance with sh64_rtc_interrupt() - */ - register unsigned long long __rtc_irq_flag __asm__ ("r3"); - - local_irq_enable(); - do {} while (ctrl_inb(rtc_base) != 0); - ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */ - - /* - * r3 is arbitrary. CDC does not support "=z". - */ - ctc_val_init = 0xffffffff; - ctc_val = ctc_val_init; - - asm volatile("gettr tr0, %1\n\t" - "putcon %0, " __CTC "\n\t" - "and %2, r63, %2\n\t" - "pta $+4, tr0\n\t" - "beq/l %2, r63, tr0\n\t" - "ptabs %1, tr0\n\t" - "getcon " __CTC ", %0\n\t" - : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag) - : "0" (0)); - local_irq_disable(); - /* - * SH-3: - * CPU clock = 4 stages * loop - * tst rm,rm if id ex - * bt/s 1b if id ex - * add #1,rd if id ex - * (if) pipe line stole - * tst rm,rm if id ex - * .... - * - * - * SH-4: - * CPU clock = 6 stages * loop - * I don't know why. - * .... - * - * SH-5: - * Use CTC register to count. This approach returns the right value - * even if the I-cache is disabled (e.g. whilst debugging.) - * - */ - - count = ctc_val_init - ctc_val; /* CTC counts down */ - - /* - * This really is count by the number of clock cycles - * by the ratio between a complete R64CNT - * wrap-around (128) and CUI interrupt being raised (64). - */ - return count*2; -} - -static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id) -{ - struct pt_regs *regs = get_irq_regs(); - - ctrl_outb(0, RTC_RCR1); /* Disable Carry Interrupts */ - regs->regs[3] = 1; /* Using r3 */ - - return IRQ_HANDLED; -} - static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED, .mask = CPU_MASK_NONE, .name = "timer", }; -static struct irqaction irq1 = { - .handler = sh64_rtc_interrupt, - .flags = IRQF_DISABLED, - .mask = CPU_MASK_NONE, - .name = "rtc", -}; void __init time_init(void) { - unsigned int cpu_clock, master_clock, bus_clock, module_clock; unsigned long interval; - unsigned long frqcr, ifc, pfc; - static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 }; -#define bfc_table ifc_table /* Same */ -#define pfc_table ifc_table /* Same */ + struct clk *clk; tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); if (!tmu_base) { @@ -401,50 +308,19 @@ void __init time_init(void) panic("Unable to remap RTC\n"); } - cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC"); - if (!cprc_base) { - panic("Unable to remap CPRC\n"); - } + clk = clk_get(NULL, "cpu_clk"); + scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / + (unsigned long long)(clk_get_rate(clk) / HZ)); rtc_sh_get_time(&xtime); setup_irq(TIMER_IRQ, &irq0); - setup_irq(RTC_IRQ, &irq1); - - /* Check how fast it is.. */ - cpu_clock = get_cpu_hz(); - - /* Note careful order of operations to maintain reasonable precision and avoid overflow. */ - scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ)); - - free_irq(RTC_IRQ, NULL); - - printk("CPU clock: %d.%02dMHz\n", - (cpu_clock / 1000000), (cpu_clock % 1000000)/10000); - { - unsigned short bfc; - frqcr = ctrl_inl(FRQCR); - ifc = ifc_table[(frqcr>> 6) & 0x0007]; - bfc = bfc_table[(frqcr>> 3) & 0x0007]; - pfc = pfc_table[(frqcr>> 12) & 0x0007]; - master_clock = cpu_clock * ifc; - bus_clock = master_clock/bfc; - } - printk("Bus clock: %d.%02dMHz\n", - (bus_clock/1000000), (bus_clock % 1000000)/10000); - module_clock = master_clock/pfc; - printk("Module clock: %d.%02dMHz\n", - (module_clock/1000000), (module_clock % 1000000)/10000); - interval = (module_clock/(HZ*4)); + clk = clk_get(NULL, "module_clk"); + interval = (clk_get_rate(clk)/(HZ*4)); printk("Interval = %ld\n", interval); - current_cpu_data.cpu_clock = cpu_clock; - current_cpu_data.master_clock = master_clock; - current_cpu_data.bus_clock = bus_clock; - current_cpu_data.module_clock = module_clock; - /* Start TMU0 */ ctrl_outb(TMU_TSTR_OFF, TMU_TSTR); ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); @@ -454,36 +330,6 @@ void __init time_init(void) ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); } -void enter_deep_standby(void) -{ - /* Disable watchdog timer */ - ctrl_outl(0xa5000000, WTCSR); - /* Configure deep standby on sleep */ - ctrl_outl(0x03, STBCR); - -#ifdef CONFIG_SH_ALPHANUMERIC - { - extern void mach_alphanum(int position, unsigned char value); - extern void mach_alphanum_brightness(int setting); - char halted[] = "Halted. "; - int i; - mach_alphanum_brightness(6); /* dimmest setting above off */ - for (i=0; i<8; i++) { - mach_alphanum(i, halted[i]); - } - asm __volatile__ ("synco"); - } -#endif - - asm __volatile__ ("sleep"); - asm __volatile__ ("synci"); - asm __volatile__ ("nop"); - asm __volatile__ ("nop"); - asm __volatile__ ("nop"); - asm __volatile__ ("nop"); - panic("Unexpected wakeup!\n"); -} - static struct resource rtc_resources[] = { [0] = { /* RTC base, filled in by rtc_init */ -- cgit v0.10.2 From bdeb3be7cc6911477b7169dad62a427d7a263d02 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 29 Sep 2008 20:14:44 +0900 Subject: sh: Use clk fwk for preset lpj on sh64, too. Signed-off-by: Paul Mundt diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 71be7ff..dca54cc 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -28,7 +28,6 @@ config SUPERH32 config SUPERH64 def_bool y if CPU_SH5 - select GENERIC_CALIBRATE_DELAY config ARCH_DEFCONFIG string -- cgit v0.10.2 From 8463200a00fe2aea938b40173198a0983f2929ef Mon Sep 17 00:00:00 2001 From: Alexey Starikovskiy Date: Fri, 26 Sep 2008 00:54:28 +0400 Subject: ACPI: EC: Rename some variables No functional changes. Signed-off-by: Alexey Starikovskiy Acked-by: Rafael J. Wysocki Signed-off-by: Len Brown diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 7f0d81c..453ba1e 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -95,10 +95,11 @@ struct acpi_ec_query_handler { u8 query_bit; }; -struct transaction_data { +struct transaction { const u8 *wdata; u8 *rdata; unsigned short irq_count; + u8 command; u8 wlen; u8 rlen; }; @@ -113,8 +114,8 @@ static struct acpi_ec { struct mutex lock; wait_queue_head_t wait; struct list_head list; - struct transaction_data *t; - spinlock_t t_lock; + struct transaction *curr; + spinlock_t curr_lock; } *boot_ec, *first_ec; /* @@ -176,37 +177,37 @@ static int ec_transaction_done(struct acpi_ec *ec) { unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->t_lock, flags); - if (!ec->t || (!ec->t->wlen && !ec->t->rlen)) + spin_lock_irqsave(&ec->curr_lock, flags); + if (!ec->curr || (!ec->curr->wlen && !ec->curr->rlen)) ret = 1; - spin_unlock_irqrestore(&ec->t_lock, flags); + spin_unlock_irqrestore(&ec->curr_lock, flags); return ret; } static void gpe_transaction(struct acpi_ec *ec, u8 status) { unsigned long flags; - spin_lock_irqsave(&ec->t_lock, flags); - if (!ec->t) + spin_lock_irqsave(&ec->curr_lock, flags); + if (!ec->curr) goto unlock; - if (ec->t->wlen > 0) { + if (ec->curr->wlen > 0) { if ((status & ACPI_EC_FLAG_IBF) == 0) { - acpi_ec_write_data(ec, *(ec->t->wdata++)); - --ec->t->wlen; + acpi_ec_write_data(ec, *(ec->curr->wdata++)); + --ec->curr->wlen; } else /* false interrupt, state didn't change */ - ++ec->t->irq_count; + ++ec->curr->irq_count; - } else if (ec->t->rlen > 0) { + } else if (ec->curr->rlen > 0) { if ((status & ACPI_EC_FLAG_OBF) == 1) { - *(ec->t->rdata++) = acpi_ec_read_data(ec); - --ec->t->rlen; + *(ec->curr->rdata++) = acpi_ec_read_data(ec); + --ec->curr->rlen; } else /* false interrupt, state didn't change */ - ++ec->t->irq_count; + ++ec->curr->irq_count; } unlock: - spin_unlock_irqrestore(&ec->t_lock, flags); + spin_unlock_irqrestore(&ec->curr_lock, flags); } static int acpi_ec_wait(struct acpi_ec *ec) @@ -248,15 +249,11 @@ static int ec_poll(struct acpi_ec *ec) return -ETIME; } -static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, - const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len, +static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, + struct transaction *t, int force_poll) { unsigned long tmp; - struct transaction_data t = {.wdata = wdata, .rdata = rdata, - .wlen = wdata_len, .rlen = rdata_len, - .irq_count = 0}; int ret = 0; pr_debug(PREFIX "transaction start\n"); /* disable GPE during transaction if storm is detected */ @@ -265,29 +262,30 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); } /* start transaction */ - spin_lock_irqsave(&ec->t_lock, tmp); + spin_lock_irqsave(&ec->curr_lock, tmp); /* following two actions should be kept atomic */ - ec->t = &t; - acpi_ec_write_cmd(ec, command); - if (command == ACPI_EC_COMMAND_QUERY) + t->irq_count = 0; + ec->curr = t; + acpi_ec_write_cmd(ec, ec->curr->command); + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - spin_unlock_irqrestore(&ec->t_lock, tmp); + spin_unlock_irqrestore(&ec->curr_lock, tmp); /* if we selected poll mode or failed in GPE-mode do a poll loop */ if (force_poll || !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) || acpi_ec_wait(ec)) ret = ec_poll(ec); pr_debug(PREFIX "transaction end\n"); - spin_lock_irqsave(&ec->t_lock, tmp); - ec->t = NULL; - spin_unlock_irqrestore(&ec->t_lock, tmp); + spin_lock_irqsave(&ec->curr_lock, tmp); + ec->curr = NULL; + spin_unlock_irqrestore(&ec->curr_lock, tmp); if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { /* check if we received SCI during transaction */ ec_check_sci(ec, acpi_ec_read_status(ec)); /* it is safe to enable GPE outside of transaction */ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) && - t.irq_count > ACPI_EC_STORM_THRESHOLD) { + t->irq_count > ACPI_EC_STORM_THRESHOLD) { pr_debug(PREFIX "GPE storm detected\n"); set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } @@ -300,17 +298,15 @@ static int ec_check_ibf0(struct acpi_ec *ec) return (status & ACPI_EC_FLAG_IBF) == 0; } -static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, - const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len, +static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t, int force_poll) { int status; u32 glk; - if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata)) + if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) return -EINVAL; - if (rdata) - memset(rdata, 0, rdata_len); + if (t->rdata) + memset(t->rdata, 0, t->rlen); mutex_lock(&ec->lock); if (ec->global_lock) { status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); @@ -326,10 +322,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, status = -ETIME; goto end; } - status = acpi_ec_transaction_unlocked(ec, command, - wdata, wdata_len, - rdata, rdata_len, - force_poll); + status = acpi_ec_transaction_unlocked(ec, t, force_poll); end: if (ec->global_lock) acpi_release_global_lock(glk); @@ -345,23 +338,32 @@ unlock: int acpi_ec_burst_enable(struct acpi_ec *ec) { u8 d; - return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0); + struct transaction t = {.command = ACPI_EC_BURST_ENABLE, + .wdata = NULL, .rdata = &d, + .wlen = 0, .rlen = 1}; + + return acpi_ec_transaction(ec, &t, 0); } int acpi_ec_burst_disable(struct acpi_ec *ec) { + struct transaction t = {.command = ACPI_EC_BURST_DISABLE, + .wdata = NULL, .rdata = NULL, + .wlen = 0, .rlen = 0}; + return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? - acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, - NULL, 0, NULL, 0, 0) : 0; + acpi_ec_transaction(ec, &t, 0) : 0; } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) { int result; u8 d; + struct transaction t = {.command = ACPI_EC_COMMAND_READ, + .wdata = &address, .rdata = &d, + .wlen = 1, .rlen = 1}; - result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ, - &address, 1, &d, 1, 0); + result = acpi_ec_transaction(ec, &t, 0); *data = d; return result; } @@ -369,8 +371,11 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) { u8 wdata[2] = { address, data }; - return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE, - wdata, 2, NULL, 0, 0); + struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, + .wdata = wdata, .rdata = NULL, + .wlen = 2, .rlen = 0}; + + return acpi_ec_transaction(ec, &t, 0); } /* @@ -432,12 +437,13 @@ int ec_transaction(u8 command, u8 * rdata, unsigned rdata_len, int force_poll) { + struct transaction t = {.command = command, + .wdata = wdata, .rdata = rdata, + .wlen = wdata_len, .rlen = rdata_len}; if (!first_ec) return -ENODEV; - return acpi_ec_transaction(first_ec, command, wdata, - wdata_len, rdata, rdata_len, - force_poll); + return acpi_ec_transaction(first_ec, &t, force_poll); } EXPORT_SYMBOL(ec_transaction); @@ -446,7 +452,9 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data) { int result; u8 d; - + struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, + .wdata = NULL, .rdata = &d, + .wlen = 0, .rlen = 1}; if (!ec || !data) return -EINVAL; @@ -456,7 +464,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data) * bit to be cleared (and thus clearing the interrupt source). */ - result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0); + result = acpi_ec_transaction(ec, &t, 0); if (result) return result; @@ -696,7 +704,7 @@ static struct acpi_ec *make_acpi_ec(void) mutex_init(&ec->lock); init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); - spin_lock_init(&ec->t_lock); + spin_lock_init(&ec->curr_lock); return ec; } -- cgit v0.10.2 From 1508487e7f16d992ad23cabd3712563ff912f413 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 30 Sep 2008 08:28:17 +0200 Subject: timers: fix itimer/many thread hang, fix fix bogus rq dereference: v3 removed the locking but also removed the rq initialization. Signed-off-by: Ingo Molnar diff --git a/kernel/sched.c b/kernel/sched.c index 29a3152..ebb03de 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4042,10 +4042,12 @@ EXPORT_PER_CPU_SYMBOL(kstat); */ unsigned long long task_delta_exec(struct task_struct *p) { - struct rq *rq; unsigned long flags; + struct rq *rq; u64 ns = 0; + rq = task_rq_lock(p, &flags); + if (task_current(rq, p)) { u64 delta_exec; @@ -4055,6 +4057,8 @@ unsigned long long task_delta_exec(struct task_struct *p) ns = delta_exec; } + task_rq_unlock(rq, &flags); + return ns; } -- cgit v0.10.2 From 948cfb219bbbc3c8e1b10a671ca88219fa42a052 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 20 Aug 2008 11:56:33 +0300 Subject: UBIFS: add a print, fix comments and more minor stuff This commit adds a reserved pool size print and tweaks the prints to make them look nicer. It also fixes and cleans-up some comments. Additionally, it deletes some blank lines to make the code look a little nicer. In other words, nothing essential. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 73db464..1a4973e 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -414,19 +414,21 @@ static int do_budget_space(struct ubifs_info *c) * @c->lst.empty_lebs + @c->freeable_cnt + @c->idx_gc_cnt - * @c->lst.taken_empty_lebs * - * @empty_lebs are available because they are empty. @freeable_cnt are - * available because they contain only free and dirty space and the - * index allocation always occurs after wbufs are synch'ed. - * @idx_gc_cnt are available because they are index LEBs that have been - * garbage collected (including trivial GC) and are awaiting the commit - * before they can be unmapped - note that the in-the-gaps method will - * grab these if it needs them. @taken_empty_lebs are empty_lebs that - * have already been allocated for some purpose (also includes those - * LEBs on the @idx_gc list). + * @c->lst.empty_lebs are available because they are empty. + * @c->freeable_cnt are available because they contain only free and + * dirty space, @c->idx_gc_cnt are available because they are index + * LEBs that have been garbage collected and are awaiting the commit + * before they can be used. And the in-the-gaps method will grab these + * if it needs them. @c->lst.taken_empty_lebs are empty LEBs that have + * already been allocated for some purpose. * - * Note, @taken_empty_lebs may temporarily be higher by one because of - * the way we serialize LEB allocations and budgeting. See a comment in - * 'ubifs_find_free_space()'. + * Note, @c->idx_gc_cnt is included to both @c->lst.empty_lebs (because + * these LEBs are empty) and to @c->lst.taken_empty_lebs (because they + * are taken until after the commit). + * + * Note, @c->lst.taken_empty_lebs may temporarily be higher by one + * because of the way we serialize LEB allocations and budgeting. See a + * comment in 'ubifs_find_free_space()'. */ lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c->lst.taken_empty_lebs; diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 2ba93da..3659b88 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -125,6 +125,7 @@ static void adjust_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, } } } + /* Not greater than parent, so compare to children */ while (1) { /* Compare to left child */ @@ -576,7 +577,6 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, ubifs_assert(!(lprops->free & 7) && !(lprops->dirty & 7)); spin_lock(&c->space_lock); - if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c->lst.taken_empty_lebs -= 1; @@ -637,11 +637,8 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, c->lst.taken_empty_lebs += 1; change_category(c, lprops); - c->idx_gc_cnt += idx_gc_cnt; - spin_unlock(&c->space_lock); - return lprops; } @@ -1262,7 +1259,6 @@ static int scan_check_cb(struct ubifs_info *c, } ubifs_scan_destroy(sleb); - return LPT_SCAN_CONTINUE; out_print: diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 3f49020..667c72d 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1144,19 +1144,21 @@ static int mount_ubifs(struct ubifs_info *c) if (mounted_read_only) ubifs_msg("mounted read-only"); x = (long long)c->main_lebs * c->leb_size; - ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d LEBs)", - x, x >> 10, x >> 20, c->main_lebs); + ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d " + "LEBs)", x, x >> 10, x >> 20, c->main_lebs); x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes; - ubifs_msg("journal size: %lld bytes (%lld KiB, %lld MiB, %d LEBs)", - x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt); - ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr)); - ubifs_msg("media format %d, latest format %d", + ubifs_msg("journal size: %lld bytes (%lld KiB, %lld MiB, %d " + "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt); + ubifs_msg("media format: %d (latest is %d)", c->fmt_version, UBIFS_FORMAT_VERSION); + ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr)); + ubifs_msg("reserved pool size: %llu bytes (%llu KiB)", + c->report_rp_size, c->report_rp_size >> 10); dbg_msg("compiled on: " __DATE__ " at " __TIME__); dbg_msg("min. I/O unit size: %d bytes", c->min_io_size); dbg_msg("LEB size: %d bytes (%d KiB)", - c->leb_size, c->leb_size / 1024); + c->leb_size, c->leb_size >> 10); dbg_msg("data journal heads: %d", c->jhead_cnt - NONDATA_JHEADS_CNT); dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X" -- cgit v0.10.2 From 8d47aef43ba166bdd11d522307c61ab23aab61c3 Mon Sep 17 00:00:00 2001 From: Hirofumi Nakagawa Date: Thu, 21 Aug 2008 17:16:40 +0300 Subject: UBIFS: remove unneeded unlikely() IS_ERR() macro already has unlikely(), so do not use constructions like 'if (unlikely(IS_ERR())'. Signed-off-by: Hirofumi Nakagawa Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index 47814cd..717d79c 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c @@ -901,11 +901,11 @@ static int get_idx_gc_leb(struct ubifs_info *c) * it is needed now for this commit. */ lp = ubifs_lpt_lookup_dirty(c, lnum); - if (unlikely(IS_ERR(lp))) + if (IS_ERR(lp)) return PTR_ERR(lp); lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_INDEX, -1); - if (unlikely(IS_ERR(lp))) + if (IS_ERR(lp)) return PTR_ERR(lp); dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, lp->free, lp->flags); diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index 02aba36..a6b633a 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c @@ -653,7 +653,7 @@ int ubifs_gc_start_commit(struct ubifs_info *c) */ while (1) { lp = ubifs_fast_find_freeable(c); - if (unlikely(IS_ERR(lp))) { + if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } @@ -665,7 +665,7 @@ int ubifs_gc_start_commit(struct ubifs_info *c) if (err) goto out; lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0); - if (unlikely(IS_ERR(lp))) { + if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } @@ -680,7 +680,7 @@ int ubifs_gc_start_commit(struct ubifs_info *c) /* Record index freeable LEBs for unmapping after commit */ while (1) { lp = ubifs_fast_find_frdi_idx(c); - if (unlikely(IS_ERR(lp))) { + if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } @@ -696,7 +696,7 @@ int ubifs_gc_start_commit(struct ubifs_info *c) /* Don't release the LEB until after the next commit */ flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX; lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1); - if (unlikely(IS_ERR(lp))) { + if (IS_ERR(lp)) { err = PTR_ERR(lp); kfree(idx_gc); goto out; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 7634c59..ba13c92 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -284,7 +284,7 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, } zn = copy_znode(c, znode); - if (unlikely(IS_ERR(zn))) + if (IS_ERR(zn)) return zn; if (zbr->len) { @@ -1128,7 +1128,7 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, ubifs_assert(znode == c->zroot.znode); znode = dirty_cow_znode(c, &c->zroot); } - if (unlikely(IS_ERR(znode)) || !p) + if (IS_ERR(znode) || !p) break; ubifs_assert(path[p - 1] >= 0); ubifs_assert(path[p - 1] < znode->child_cnt); diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 649bec7..cfd31e2 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -446,7 +446,7 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size) int type; xent = ubifs_tnc_next_ent(c, &key, &nm); - if (unlikely(IS_ERR(xent))) { + if (IS_ERR(xent)) { err = PTR_ERR(xent); break; } -- cgit v0.10.2 From 746103aca2ae2b044e32a6ab06a6536652124c99 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 27 Aug 2008 12:50:57 +0300 Subject: UBIFS: inline one-line functions 'ubifs_get_lprops()' and 'ubifs_release_lprops()' basically wrap mutex lock and unlock. We have them because we want lprops subsystem be separate and as independent as possible. And we planned better locking rules for lprops. Anyway, because they are short, it is better to inline them. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 3659b88..f27176e 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -461,18 +461,6 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) } /** - * ubifs_get_lprops - get reference to LEB properties. - * @c: the UBIFS file-system description object - * - * This function locks lprops. Lprops have to be unlocked by - * 'ubifs_release_lprops()'. - */ -void ubifs_get_lprops(struct ubifs_info *c) -{ - mutex_lock(&c->lp_mutex); -} - -/** * calc_dark - calculate LEB dark space size. * @c: the UBIFS file-system description object * @spc: amount of free and dirty space in the LEB @@ -643,22 +631,6 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, } /** - * ubifs_release_lprops - release lprops lock. - * @c: the UBIFS file-system description object - * - * This function has to be called after each 'ubifs_get_lprops()' call to - * unlock lprops. - */ -void ubifs_release_lprops(struct ubifs_info *c) -{ - ubifs_assert(mutex_is_locked(&c->lp_mutex)); - ubifs_assert(c->lst.empty_lebs >= 0 && - c->lst.empty_lebs <= c->main_lebs); - - mutex_unlock(&c->lp_mutex); -} - -/** * ubifs_get_lp_stats - get lprops statistics. * @c: UBIFS file-system description object * @st: return statistics diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index 4c12a92..4fa81d8 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h @@ -310,4 +310,31 @@ static inline int ubifs_tnc_lookup(struct ubifs_info *c, return ubifs_tnc_locate(c, key, node, NULL, NULL); } +/** + * ubifs_get_lprops - get reference to LEB properties. + * @c: the UBIFS file-system description object + * + * This function locks lprops. Lprops have to be unlocked by + * 'ubifs_release_lprops()'. + */ +static inline void ubifs_get_lprops(struct ubifs_info *c) +{ + mutex_lock(&c->lp_mutex); +} + +/** + * ubifs_release_lprops - release lprops lock. + * @c: the UBIFS file-system description object + * + * This function has to be called after each 'ubifs_get_lprops()' call to + * unlock lprops. + */ +static inline void ubifs_release_lprops(struct ubifs_info *c) +{ + ubifs_assert(mutex_is_locked(&c->lp_mutex)); + ubifs_assert(c->lst.empty_lebs >= 0 && + c->lst.empty_lebs <= c->main_lebs); + mutex_unlock(&c->lp_mutex); +} + #endif /* __UBIFS_MISC_H__ */ diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 17c620b..ce86549 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1586,12 +1586,10 @@ int ubifs_lpt_post_commit(struct ubifs_info *c); void ubifs_lpt_free(struct ubifs_info *c, int wr_only); /* lprops.c */ -void ubifs_get_lprops(struct ubifs_info *c); const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, const struct ubifs_lprops *lp, int free, int dirty, int flags, int idx_gc_cnt); -void ubifs_release_lprops(struct ubifs_info *c); void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *stats); void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, int cat); -- cgit v0.10.2 From a70948b564e9f6cb81115c606d46f5b74a77b7c2 Mon Sep 17 00:00:00 2001 From: Julien Brunel Date: Fri, 29 Aug 2008 11:08:32 +0200 Subject: UBIFS: use an IS_ERR test rather than a NULL test In case of error, the function kthread_create returns an ERR pointer, but never returns a NULL pointer. So a NULL test that comes before an IS_ERR test should be deleted. The semantic match that finds this problem is as follows: (http://www.emn.fr/x-info/coccinelle/) // @match_bad_null_test@ expression x, E; statement S1,S2; @@ x = kthread_create(...) ... when != x = E * if (x == NULL) S1 else S2 // Signed-off-by: Julien Brunel Signed-off-by: Julia Lawall Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 667c72d..d87b0cf 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1032,8 +1032,6 @@ static int mount_ubifs(struct ubifs_info *c) /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name); - if (!c->bgt) - c->bgt = ERR_PTR(-EINVAL); if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; @@ -1347,8 +1345,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name); - if (!c->bgt) - c->bgt = ERR_PTR(-EINVAL); if (IS_ERR(c->bgt)) { err = PTR_ERR(c->bgt); c->bgt = NULL; -- cgit v0.10.2 From 4793e7c5e1c88382ead18db5ca072bac54467318 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 2 Sep 2008 16:29:46 +0300 Subject: UBIFS: add bulk-read facility Some flash media are capable of reading sequentially at faster rates. UBIFS bulk-read facility is designed to take advantage of that, by reading in one go consecutive data nodes that are also located consecutively in the same LEB. Read speed on Arm platform with OneNAND goes from 17 MiB/s to 19 MiB/s. Signed-off-by: Adrian Hunter diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 6a0d70a..340512c 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -86,6 +86,9 @@ norm_unmount (*) commit on unmount; the journal is committed fast_unmount do not commit on unmount; this option makes unmount faster, but the next mount slower because of the need to replay the journal. +bulk_read read more in one go to take advantage of flash + media that read faster sequentially +no_bulk_read (*) do not bulk-read Quick usage instructions diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 3d698e2..cdcfe95 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -577,8 +577,256 @@ out: return copied; } +/** + * populate_page - copy data nodes into a page for bulk-read. + * @c: UBIFS file-system description object + * @page: page + * @bu: bulk-read information + * @n: next zbranch slot + * + * This function returns %0 on success and a negative error code on failure. + */ +static int populate_page(struct ubifs_info *c, struct page *page, + struct bu_info *bu, int *n) +{ + int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 1, read = 0; + struct inode *inode = page->mapping->host; + loff_t i_size = i_size_read(inode); + unsigned int page_block; + void *addr, *zaddr; + pgoff_t end_index; + + dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", + inode->i_ino, page->index, i_size, page->flags); + + addr = zaddr = kmap(page); + + end_index = (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + if (!i_size || page->index > end_index) { + memset(addr, 0, PAGE_CACHE_SIZE); + goto out_hole; + } + + page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; + while (1) { + int err, len, out_len, dlen; + + if (nn >= bu->cnt || + key_block(c, &bu->zbranch[nn].key) != page_block) + memset(addr, 0, UBIFS_BLOCK_SIZE); + else { + struct ubifs_data_node *dn; + + dn = bu->buf + (bu->zbranch[nn].offs - offs); + + ubifs_assert(dn->ch.sqnum > + ubifs_inode(inode)->creat_sqnum); + + len = le32_to_cpu(dn->size); + if (len <= 0 || len > UBIFS_BLOCK_SIZE) + goto out_err; + + dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; + out_len = UBIFS_BLOCK_SIZE; + err = ubifs_decompress(&dn->data, dlen, addr, &out_len, + le16_to_cpu(dn->compr_type)); + if (err || len != out_len) + goto out_err; + + if (len < UBIFS_BLOCK_SIZE) + memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); + + nn += 1; + hole = 0; + read = (i << UBIFS_BLOCK_SHIFT) + len; + } + if (++i >= UBIFS_BLOCKS_PER_PAGE) + break; + addr += UBIFS_BLOCK_SIZE; + page_block += 1; + } + + if (end_index == page->index) { + int len = i_size & (PAGE_CACHE_SIZE - 1); + + if (len < read) + memset(zaddr + len, 0, read - len); + } + +out_hole: + if (hole) { + SetPageChecked(page); + dbg_gen("hole"); + } + + SetPageUptodate(page); + ClearPageError(page); + flush_dcache_page(page); + kunmap(page); + *n = nn; + return 0; + +out_err: + ClearPageUptodate(page); + SetPageError(page); + flush_dcache_page(page); + kunmap(page); + ubifs_err("bad data node (block %u, inode %lu)", + page_block, inode->i_ino); + return -EINVAL; +} + +/** + * ubifs_do_bulk_read - do bulk-read. + * @c: UBIFS file-system description object + * @page1: first page + * + * This function returns %1 if the bulk-read is done, otherwise %0 is returned. + */ +static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) +{ + pgoff_t offset = page1->index, end_index; + struct address_space *mapping = page1->mapping; + struct inode *inode = mapping->host; + struct ubifs_inode *ui = ubifs_inode(inode); + struct bu_info *bu; + int err, page_idx, page_cnt, ret = 0, n = 0; + loff_t isize; + + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); + if (!bu) + return 0; + + bu->buf_len = c->bulk_read_buf_size; + bu->buf = kmalloc(bu->buf_len, GFP_NOFS); + if (!bu->buf) + goto out_free; + + data_key_init(c, &bu->key, inode->i_ino, + offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); + + err = ubifs_tnc_get_bu_keys(c, bu); + if (err) + goto out_warn; + + if (bu->eof) { + /* Turn off bulk-read at the end of the file */ + ui->read_in_a_row = 1; + ui->bulk_read = 0; + } + + page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT; + if (!page_cnt) { + /* + * This happens when there are multiple blocks per page and the + * blocks for the first page we are looking for, are not + * together. If all the pages were like this, bulk-read would + * reduce performance, so we turn it off for a while. + */ + ui->read_in_a_row = 0; + ui->bulk_read = 0; + goto out_free; + } + + if (bu->cnt) { + err = ubifs_tnc_bulk_read(c, bu); + if (err) + goto out_warn; + } + + err = populate_page(c, page1, bu, &n); + if (err) + goto out_warn; + + unlock_page(page1); + ret = 1; + + isize = i_size_read(inode); + if (isize == 0) + goto out_free; + end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); + + for (page_idx = 1; page_idx < page_cnt; page_idx++) { + pgoff_t page_offset = offset + page_idx; + struct page *page; + + if (page_offset > end_index) + break; + page = find_or_create_page(mapping, page_offset, + GFP_NOFS | __GFP_COLD); + if (!page) + break; + if (!PageUptodate(page)) + err = populate_page(c, page, bu, &n); + unlock_page(page); + page_cache_release(page); + if (err) + break; + } + + ui->last_page_read = offset + page_idx - 1; + +out_free: + kfree(bu->buf); + kfree(bu); + return ret; + +out_warn: + ubifs_warn("ignoring error %d and skipping bulk-read", err); + goto out_free; +} + +/** + * ubifs_bulk_read - determine whether to bulk-read and, if so, do it. + * @page: page from which to start bulk-read. + * + * Some flash media are capable of reading sequentially at faster rates. UBIFS + * bulk-read facility is designed to take advantage of that, by reading in one + * go consecutive data nodes that are also located consecutively in the same + * LEB. This function returns %1 if a bulk-read is done and %0 otherwise. + */ +static int ubifs_bulk_read(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct ubifs_info *c = inode->i_sb->s_fs_info; + struct ubifs_inode *ui = ubifs_inode(inode); + pgoff_t index = page->index, last_page_read = ui->last_page_read; + int ret = 0; + + ui->last_page_read = index; + + if (!c->bulk_read) + return 0; + /* + * Bulk-read is protected by ui_mutex, but it is an optimization, so + * don't bother if we cannot lock the mutex. + */ + if (!mutex_trylock(&ui->ui_mutex)) + return 0; + if (index != last_page_read + 1) { + /* Turn off bulk-read if we stop reading sequentially */ + ui->read_in_a_row = 1; + if (ui->bulk_read) + ui->bulk_read = 0; + goto out_unlock; + } + if (!ui->bulk_read) { + ui->read_in_a_row += 1; + if (ui->read_in_a_row < 3) + goto out_unlock; + /* Three reads in a row, so switch on bulk-read */ + ui->bulk_read = 1; + } + ret = ubifs_do_bulk_read(c, page); +out_unlock: + mutex_unlock(&ui->ui_mutex); + return ret; +} + static int ubifs_readpage(struct file *file, struct page *page) { + if (ubifs_bulk_read(page)) + return 0; do_readpage(page); unlock_page(page); return 0; diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 8f74760..9ee6508 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h @@ -484,7 +484,7 @@ static inline void key_copy(const struct ubifs_info *c, * @key2: the second key to compare * * This function compares 2 keys and returns %-1 if @key1 is less than - * @key2, 0 if the keys are equivalent and %1 if @key1 is greater than @key2. + * @key2, %0 if the keys are equivalent and %1 if @key1 is greater than @key2. */ static inline int keys_cmp(const struct ubifs_info *c, const union ubifs_key *key1, @@ -503,6 +503,26 @@ static inline int keys_cmp(const struct ubifs_info *c, } /** + * keys_eq - determine if keys are equivalent. + * @c: UBIFS file-system description object + * @key1: the first key to compare + * @key2: the second key to compare + * + * This function compares 2 keys and returns %1 if @key1 is equal to @key2 and + * %0 if not. + */ +static inline int keys_eq(const struct ubifs_info *c, + const union ubifs_key *key1, + const union ubifs_key *key2) +{ + if (key1->u32[0] != key2->u32[0]) + return 0; + if (key1->u32[1] != key2->u32[1]) + return 0; + return 1; +} + +/** * is_hash_key - is a key vulnerable to hash collisions. * @c: UBIFS file-system description object * @key: key diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index d87b0cf..b1c57e8 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -401,6 +401,11 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) else if (c->mount_opts.unmount_mode == 1) seq_printf(s, ",norm_unmount"); + if (c->mount_opts.bulk_read == 2) + seq_printf(s, ",bulk_read"); + else if (c->mount_opts.bulk_read == 1) + seq_printf(s, ",no_bulk_read"); + return 0; } @@ -538,6 +543,18 @@ static int init_constants_early(struct ubifs_info *c) * calculations when reporting free space. */ c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; + /* Buffer size for bulk-reads */ + c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; + if (c->bulk_read_buf_size > c->leb_size) + c->bulk_read_buf_size = c->leb_size; + if (c->bulk_read_buf_size > 128 * 1024) { + /* Check if we can kmalloc more than 128KiB */ + void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); + + kfree(try); + if (!try) + c->bulk_read_buf_size = 128 * 1024; + } return 0; } @@ -840,17 +857,23 @@ static int check_volume_empty(struct ubifs_info *c) * * Opt_fast_unmount: do not run a journal commit before un-mounting * Opt_norm_unmount: run a journal commit before un-mounting + * Opt_bulk_read: enable bulk-reads + * Opt_no_bulk_read: disable bulk-reads * Opt_err: just end of array marker */ enum { Opt_fast_unmount, Opt_norm_unmount, + Opt_bulk_read, + Opt_no_bulk_read, Opt_err, }; static match_table_t tokens = { {Opt_fast_unmount, "fast_unmount"}, {Opt_norm_unmount, "norm_unmount"}, + {Opt_bulk_read, "bulk_read"}, + {Opt_no_bulk_read, "no_bulk_read"}, {Opt_err, NULL}, }; @@ -888,6 +911,14 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, c->mount_opts.unmount_mode = 1; c->fast_unmount = 0; break; + case Opt_bulk_read: + c->mount_opts.bulk_read = 2; + c->bulk_read = 1; + break; + case Opt_no_bulk_read: + c->mount_opts.bulk_read = 1; + c->bulk_read = 0; + break; default: ubifs_err("unrecognized mount option \"%s\" " "or missing value", p); diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index ba13c92..d279012 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1492,6 +1492,289 @@ out: } /** + * ubifs_tnc_get_bu_keys - lookup keys for bulk-read. + * @c: UBIFS file-system description object + * @bu: bulk-read parameters and results + * + * Lookup consecutive data node keys for the same inode that reside + * consecutively in the same LEB. + */ +int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) +{ + int n, err = 0, lnum = -1, uninitialized_var(offs); + int uninitialized_var(len); + unsigned int block = key_block(c, &bu->key); + struct ubifs_znode *znode; + + bu->cnt = 0; + bu->blk_cnt = 0; + bu->eof = 0; + + mutex_lock(&c->tnc_mutex); + /* Find first key */ + err = ubifs_lookup_level0(c, &bu->key, &znode, &n); + if (err < 0) + goto out; + if (err) { + /* Key found */ + len = znode->zbranch[n].len; + /* The buffer must be big enough for at least 1 node */ + if (len > bu->buf_len) { + err = -EINVAL; + goto out; + } + /* Add this key */ + bu->zbranch[bu->cnt++] = znode->zbranch[n]; + bu->blk_cnt += 1; + lnum = znode->zbranch[n].lnum; + offs = ALIGN(znode->zbranch[n].offs + len, 8); + } + while (1) { + struct ubifs_zbranch *zbr; + union ubifs_key *key; + unsigned int next_block; + + /* Find next key */ + err = tnc_next(c, &znode, &n); + if (err) + goto out; + zbr = &znode->zbranch[n]; + key = &zbr->key; + /* See if there is another data key for this file */ + if (key_inum(c, key) != key_inum(c, &bu->key) || + key_type(c, key) != UBIFS_DATA_KEY) { + err = -ENOENT; + goto out; + } + if (lnum < 0) { + /* First key found */ + lnum = zbr->lnum; + offs = ALIGN(zbr->offs + zbr->len, 8); + len = zbr->len; + if (len > bu->buf_len) { + err = -EINVAL; + goto out; + } + } else { + /* + * The data nodes must be in consecutive positions in + * the same LEB. + */ + if (zbr->lnum != lnum || zbr->offs != offs) + goto out; + offs += ALIGN(zbr->len, 8); + len = ALIGN(len, 8) + zbr->len; + /* Must not exceed buffer length */ + if (len > bu->buf_len) + goto out; + } + /* Allow for holes */ + next_block = key_block(c, key); + bu->blk_cnt += (next_block - block - 1); + if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) + goto out; + block = next_block; + /* Add this key */ + bu->zbranch[bu->cnt++] = *zbr; + bu->blk_cnt += 1; + /* See if we have room for more */ + if (bu->cnt >= UBIFS_MAX_BULK_READ) + goto out; + if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) + goto out; + } +out: + if (err == -ENOENT) { + bu->eof = 1; + err = 0; + } + bu->gc_seq = c->gc_seq; + mutex_unlock(&c->tnc_mutex); + if (err) + return err; + /* + * An enormous hole could cause bulk-read to encompass too many + * page cache pages, so limit the number here. + */ + if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) + bu->blk_cnt = UBIFS_MAX_BULK_READ; + /* + * Ensure that bulk-read covers a whole number of page cache + * pages. + */ + if (UBIFS_BLOCKS_PER_PAGE == 1 || + !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1))) + return 0; + if (bu->eof) { + /* At the end of file we can round up */ + bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1; + return 0; + } + /* Exclude data nodes that do not make up a whole page cache page */ + block = key_block(c, &bu->key) + bu->blk_cnt; + block &= ~(UBIFS_BLOCKS_PER_PAGE - 1); + while (bu->cnt) { + if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block) + break; + bu->cnt -= 1; + } + return 0; +} + +/** + * read_wbuf - bulk-read from a LEB with a wbuf. + * @wbuf: wbuf that may overlap the read + * @buf: buffer into which to read + * @len: read length + * @lnum: LEB number from which to read + * @offs: offset from which to read + * + * This functions returns %0 on success or a negative error code on failure. + */ +static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, + int offs) +{ + const struct ubifs_info *c = wbuf->c; + int rlen, overlap; + + dbg_io("LEB %d:%d, length %d", lnum, offs, len); + ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); + ubifs_assert(!(offs & 7) && offs < c->leb_size); + ubifs_assert(offs + len <= c->leb_size); + + spin_lock(&wbuf->lock); + overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); + if (!overlap) { + /* We may safely unlock the write-buffer and read the data */ + spin_unlock(&wbuf->lock); + return ubi_read(c->ubi, lnum, buf, offs, len); + } + + /* Don't read under wbuf */ + rlen = wbuf->offs - offs; + if (rlen < 0) + rlen = 0; + + /* Copy the rest from the write-buffer */ + memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); + spin_unlock(&wbuf->lock); + + if (rlen > 0) + /* Read everything that goes before write-buffer */ + return ubi_read(c->ubi, lnum, buf, offs, rlen); + + return 0; +} + +/** + * validate_data_node - validate data nodes for bulk-read. + * @c: UBIFS file-system description object + * @buf: buffer containing data node to validate + * @zbr: zbranch of data node to validate + * + * This functions returns %0 on success or a negative error code on failure. + */ +static int validate_data_node(struct ubifs_info *c, void *buf, + struct ubifs_zbranch *zbr) +{ + union ubifs_key key1; + struct ubifs_ch *ch = buf; + int err, len; + + if (ch->node_type != UBIFS_DATA_NODE) { + ubifs_err("bad node type (%d but expected %d)", + ch->node_type, UBIFS_DATA_NODE); + goto out_err; + } + + err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0); + if (err) { + ubifs_err("expected node type %d", UBIFS_DATA_NODE); + goto out; + } + + len = le32_to_cpu(ch->len); + if (len != zbr->len) { + ubifs_err("bad node length %d, expected %d", len, zbr->len); + goto out_err; + } + + /* Make sure the key of the read node is correct */ + key_read(c, buf + UBIFS_KEY_OFFSET, &key1); + if (!keys_eq(c, &zbr->key, &key1)) { + ubifs_err("bad key in node at LEB %d:%d", + zbr->lnum, zbr->offs); + dbg_tnc("looked for key %s found node's key %s", + DBGKEY(&zbr->key), DBGKEY1(&key1)); + goto out_err; + } + + return 0; + +out_err: + err = -EINVAL; +out: + ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs); + dbg_dump_node(c, buf); + dbg_dump_stack(); + return err; +} + +/** + * ubifs_tnc_bulk_read - read a number of data nodes in one go. + * @c: UBIFS file-system description object + * @bu: bulk-read parameters and results + * + * This functions reads and validates the data nodes that were identified by the + * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success, + * -EAGAIN to indicate a race with GC, or another negative error code on + * failure. + */ +int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) +{ + int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i; + struct ubifs_wbuf *wbuf; + void *buf; + + len = bu->zbranch[bu->cnt - 1].offs; + len += bu->zbranch[bu->cnt - 1].len - offs; + if (len > bu->buf_len) { + ubifs_err("buffer too small %d vs %d", bu->buf_len, len); + return -EINVAL; + } + + /* Do the read */ + wbuf = ubifs_get_wbuf(c, lnum); + if (wbuf) + err = read_wbuf(wbuf, bu->buf, len, lnum, offs); + else + err = ubi_read(c->ubi, lnum, bu->buf, offs, len); + + /* Check for a race with GC */ + if (maybe_leb_gced(c, lnum, bu->gc_seq)) + return -EAGAIN; + + if (err && err != -EBADMSG) { + ubifs_err("failed to read from LEB %d:%d, error %d", + lnum, offs, err); + dbg_dump_stack(); + dbg_tnc("key %s", DBGKEY(&bu->key)); + return err; + } + + /* Validate the nodes read */ + buf = bu->buf; + for (i = 0; i < bu->cnt; i++) { + err = validate_data_node(c, buf, &bu->zbranch[i]); + if (err) + return err; + buf = buf + ALIGN(bu->zbranch[i].len, 8); + } + + return 0; +} + +/** * do_lookup_nm- look up a "hashed" node. * @c: UBIFS file-system description object * @key: node key to lookup diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index ce86549..8513239 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -142,6 +142,9 @@ /* Maximum expected tree height for use by bottom_up_buf */ #define BOTTOM_UP_HEIGHT 64 +/* Maximum number of data nodes to bulk-read */ +#define UBIFS_MAX_BULK_READ 32 + /* * Lockdep classes for UBIFS inode @ui_mutex. */ @@ -329,8 +332,8 @@ struct ubifs_gced_idx_leb { * @dirty: non-zero if the inode is dirty * @xattr: non-zero if this is an extended attribute inode * @ui_mutex: serializes inode write-back with the rest of VFS operations, - * serializes "clean <-> dirty" state changes, protects @dirty, - * @ui_size, and @xattr_size + * serializes "clean <-> dirty" state changes, serializes bulk-read, + * protects @dirty, @ui_size, and @xattr_size * @ui_lock: protects @synced_i_size * @synced_i_size: synchronized size of inode, i.e. the value of inode size * currently stored on the flash; used only for regular file @@ -338,6 +341,9 @@ struct ubifs_gced_idx_leb { * @ui_size: inode size used by UBIFS when writing to flash * @flags: inode flags (@UBIFS_COMPR_FL, etc) * @compr_type: default compression type used for this inode + * @last_page_read: page number of last page read (for bulk read) + * @read_in_a_row: number of consecutive pages read in a row (for bulk read) + * @bulk_read: indicates whether bulk-read should be used * @data_len: length of the data attached to the inode * @data: inode's data * @@ -385,6 +391,9 @@ struct ubifs_inode { loff_t ui_size; int flags; int compr_type; + pgoff_t last_page_read; + pgoff_t read_in_a_row; + int bulk_read; int data_len; void *data; }; @@ -744,6 +753,28 @@ struct ubifs_znode { }; /** + * struct bu_info - bulk-read information + * @key: first data node key + * @zbranch: zbranches of data nodes to bulk read + * @buf: buffer to read into + * @buf_len: buffer length + * @gc_seq: GC sequence number to detect races with GC + * @cnt: number of data nodes for bulk read + * @blk_cnt: number of data blocks including holes + * @oef: end of file reached + */ +struct bu_info { + union ubifs_key key; + struct ubifs_zbranch zbranch[UBIFS_MAX_BULK_READ]; + void *buf; + int buf_len; + int gc_seq; + int cnt; + int blk_cnt; + int eof; +}; + +/** * struct ubifs_node_range - node length range description data structure. * @len: fixed node length * @min_len: minimum possible node length @@ -862,9 +893,11 @@ struct ubifs_orphan { /** * struct ubifs_mount_opts - UBIFS-specific mount options information. * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) + * @bulk_read: enable bulk-reads */ struct ubifs_mount_opts { unsigned int unmount_mode:2; + unsigned int bulk_read:2; }; /** @@ -965,6 +998,9 @@ struct ubifs_mount_opts { * @old_leb_cnt: count of logical eraseblocks before re-size * @ro_media: the underlying UBI volume is read-only * + * @bulk_read: enable bulk-reads + * @bulk_read_buf_size: buffer size for bulk-reads + * * @dirty_pg_cnt: number of dirty pages (not used) * @dirty_zn_cnt: number of dirty znodes * @clean_zn_cnt: number of clean znodes @@ -1205,6 +1241,9 @@ struct ubifs_info { int old_leb_cnt; int ro_media; + int bulk_read; + int bulk_read_buf_size; + atomic_long_t dirty_pg_cnt; atomic_long_t dirty_zn_cnt; atomic_long_t clean_zn_cnt; @@ -1490,6 +1529,8 @@ void destroy_old_idx(struct ubifs_info *c); int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, int lnum, int offs); int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode); +int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu); +int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu); /* tnc_misc.c */ struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr, -- cgit v0.10.2 From 2953e73f1ce4b3284b409aefb9d46bbde6515c37 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 4 Sep 2008 16:26:00 +0300 Subject: UBIFS: add no_chk_data_crc mount option UBIFS read performance can be improved by skipping the CRC check when data nodes are read. This option can be used if the underlying media is considered to be highly reliable. Note that CRCs are always checked for metadata. Read speed on Arm platform with OneNAND goes from 19 MiB/s to 27 MiB/s with data CRC checking disabled. Signed-off-by: Adrian Hunter diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 340512c..dd84ea3 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -89,6 +89,12 @@ fast_unmount do not commit on unmount; this option makes bulk_read read more in one go to take advantage of flash media that read faster sequentially no_bulk_read (*) do not bulk-read +no_chk_data_crc skip checking of CRCs on data nodes in order to + improve read performance. Use this option only + if the flash media is highly reliable. The effect + of this option is that corruption of the contents + of a file can go unnoticed. +chk_data_crc (*) do not skip checking CRCs on data nodes Quick usage instructions diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 054363f..40e2790 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -74,6 +74,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * @quiet: print no messages + * @chk_crc: indicates whether to always check the CRC * * This function checks node magic number and CRC checksum. This function also * validates node length to prevent UBIFS from becoming crazy when an attacker @@ -85,7 +86,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) * or magic. */ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, - int offs, int quiet) + int offs, int quiet, int chk_crc) { int err = -EINVAL, type, node_len; uint32_t crc, node_crc, magic; @@ -121,6 +122,10 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, node_len > c->ranges[type].max_len) goto out_len; + if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc) + if (c->no_chk_data_crc) + return 0; + crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) { @@ -722,7 +727,7 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, goto out; } - err = ubifs_check_node(c, buf, lnum, offs, 0); + err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { ubifs_err("expected node type %d", type); return err; @@ -781,7 +786,7 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, goto out; } - err = ubifs_check_node(c, buf, lnum, offs, 0); + err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { ubifs_err("expected node type %d", type); return err; diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index acf5c5f..0ed8247 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -87,7 +87,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, dbg_scan("scanning %s", dbg_ntype(ch->node_type)); - if (ubifs_check_node(c, buf, lnum, offs, quiet)) + if (ubifs_check_node(c, buf, lnum, offs, quiet, 1)) return SCANNED_A_CORRUPT_NODE; if (ch->node_type == UBIFS_PAD_NODE) { diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index b1c57e8..cf078b5 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -406,6 +406,11 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) else if (c->mount_opts.bulk_read == 1) seq_printf(s, ",no_bulk_read"); + if (c->mount_opts.chk_data_crc == 2) + seq_printf(s, ",chk_data_crc"); + else if (c->mount_opts.chk_data_crc == 1) + seq_printf(s, ",no_chk_data_crc"); + return 0; } @@ -859,6 +864,8 @@ static int check_volume_empty(struct ubifs_info *c) * Opt_norm_unmount: run a journal commit before un-mounting * Opt_bulk_read: enable bulk-reads * Opt_no_bulk_read: disable bulk-reads + * Opt_chk_data_crc: check CRCs when reading data nodes + * Opt_no_chk_data_crc: do not check CRCs when reading data nodes * Opt_err: just end of array marker */ enum { @@ -866,6 +873,8 @@ enum { Opt_norm_unmount, Opt_bulk_read, Opt_no_bulk_read, + Opt_chk_data_crc, + Opt_no_chk_data_crc, Opt_err, }; @@ -874,6 +883,8 @@ static match_table_t tokens = { {Opt_norm_unmount, "norm_unmount"}, {Opt_bulk_read, "bulk_read"}, {Opt_no_bulk_read, "no_bulk_read"}, + {Opt_chk_data_crc, "chk_data_crc"}, + {Opt_no_chk_data_crc, "no_chk_data_crc"}, {Opt_err, NULL}, }; @@ -919,6 +930,14 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, c->mount_opts.bulk_read = 1; c->bulk_read = 0; break; + case Opt_chk_data_crc: + c->mount_opts.chk_data_crc = 2; + c->no_chk_data_crc = 0; + break; + case Opt_no_chk_data_crc: + c->mount_opts.chk_data_crc = 1; + c->no_chk_data_crc = 1; + break; default: ubifs_err("unrecognized mount option \"%s\" " "or missing value", p); @@ -1027,6 +1046,8 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } + c->always_chk_crc = 1; + err = ubifs_read_superblock(c); if (err) goto out_free; @@ -1168,6 +1189,8 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_infos; + c->always_chk_crc = 0; + ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"", c->vi.ubi_num, c->vi.vol_id, c->vi.name); if (mounted_read_only) @@ -1313,6 +1336,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) mutex_lock(&c->umount_mutex); c->remounting_rw = 1; + c->always_chk_crc = 1; /* Check for enough free space */ if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { @@ -1381,13 +1405,15 @@ static int ubifs_remount_rw(struct ubifs_info *c) c->bgt = NULL; ubifs_err("cannot spawn \"%s\", error %d", c->bgt_name, err); - return err; + goto out; } wake_up_process(c->bgt); c->orph_buf = vmalloc(c->leb_size); - if (!c->orph_buf) - return -ENOMEM; + if (!c->orph_buf) { + err = -ENOMEM; + goto out; + } /* Check for enough log space */ lnum = c->lhead_lnum + 1; @@ -1414,6 +1440,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) dbg_gen("re-mounted read-write"); c->vfs_sb->s_flags &= ~MS_RDONLY; c->remounting_rw = 0; + c->always_chk_crc = 0; mutex_unlock(&c->umount_mutex); return 0; @@ -1429,6 +1456,7 @@ out: c->ileb_buf = NULL; ubifs_lpt_free(c, 1); c->remounting_rw = 0; + c->always_chk_crc = 0; mutex_unlock(&c->umount_mutex); return err; } diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index d279012..66dc571 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -470,6 +470,10 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type, if (node_len != len) return 0; + if (type == UBIFS_DATA_NODE && !c->always_chk_crc) + if (c->no_chk_data_crc) + return 0; + crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) @@ -1687,7 +1691,7 @@ static int validate_data_node(struct ubifs_info *c, void *buf, goto out_err; } - err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0); + err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0); if (err) { ubifs_err("expected node type %d", UBIFS_DATA_NODE); goto out; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 8513239..d6ae3f7 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -894,10 +894,12 @@ struct ubifs_orphan { * struct ubifs_mount_opts - UBIFS-specific mount options information. * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) * @bulk_read: enable bulk-reads + * @chk_data_crc: check CRCs when reading data nodes */ struct ubifs_mount_opts { unsigned int unmount_mode:2; unsigned int bulk_read:2; + unsigned int chk_data_crc:2; }; /** @@ -1001,6 +1003,9 @@ struct ubifs_mount_opts { * @bulk_read: enable bulk-reads * @bulk_read_buf_size: buffer size for bulk-reads * + * @no_chk_data_crc: do not check CRCs when reading data nodes (except during + * recovery) + * * @dirty_pg_cnt: number of dirty pages (not used) * @dirty_zn_cnt: number of dirty znodes * @clean_zn_cnt: number of clean znodes @@ -1138,6 +1143,7 @@ struct ubifs_mount_opts { * @rcvrd_mst_node: recovered master node to write when mounting ro to rw * @size_tree: inode size information for recovery * @remounting_rw: set while remounting from ro to rw (sb flags have MS_RDONLY) + * @always_chk_crc: always check CRCs (while mounting and remounting rw) * @mount_opts: UBIFS-specific mount options * * @dbg_buf: a buffer of LEB size used for debugging purposes @@ -1244,6 +1250,8 @@ struct ubifs_info { int bulk_read; int bulk_read_buf_size; + int no_chk_data_crc; + atomic_long_t dirty_pg_cnt; atomic_long_t dirty_zn_cnt; atomic_long_t clean_zn_cnt; @@ -1374,6 +1382,7 @@ struct ubifs_info { struct ubifs_mst_node *rcvrd_mst_node; struct rb_root size_tree; int remounting_rw; + int always_chk_crc; struct ubifs_mount_opts mount_opts; #ifdef CONFIG_UBIFS_FS_DEBUG @@ -1416,7 +1425,7 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, int offs, int dtype); int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, - int offs, int quiet); + int offs, int quiet, int chk_crc); void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last); int ubifs_io_init(struct ubifs_info *c); -- cgit v0.10.2 From 2242c689ecc390fb4719f595751351d1ecc5c409 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 5 Sep 2008 11:56:05 +0300 Subject: UBIFS: improve znode splitting rules When inserting into a full znode it is split into two znodes. Because data node keys are usually consecutive, it is better to try to keep them together. This patch does a better job of that. Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 66dc571..e0878a4 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1962,7 +1962,7 @@ static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode, { struct ubifs_znode *zn, *zi, *zp; int i, keep, move, appending = 0; - union ubifs_key *key = &zbr->key; + union ubifs_key *key = &zbr->key, *key1; ubifs_assert(n >= 0 && n <= c->fanout); @@ -2003,20 +2003,33 @@ again: zn->level = znode->level; /* Decide where to split */ - if (znode->level == 0 && n == c->fanout && - key_type(c, key) == UBIFS_DATA_KEY) { - union ubifs_key *key1; - - /* - * If this is an inode which is being appended - do not split - * it because no other zbranches can be inserted between - * zbranches of consecutive data nodes anyway. - */ - key1 = &znode->zbranch[n - 1].key; - if (key_inum(c, key1) == key_inum(c, key) && - key_type(c, key1) == UBIFS_DATA_KEY && - key_block(c, key1) == key_block(c, key) - 1) - appending = 1; + if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) { + /* Try not to split consecutive data keys */ + if (n == c->fanout) { + key1 = &znode->zbranch[n - 1].key; + if (key_inum(c, key1) == key_inum(c, key) && + key_type(c, key1) == UBIFS_DATA_KEY) + appending = 1; + } else + goto check_split; + } else if (appending && n != c->fanout) { + /* Try not to split consecutive data keys */ + appending = 0; +check_split: + if (n >= (c->fanout + 1) / 2) { + key1 = &znode->zbranch[0].key; + if (key_inum(c, key1) == key_inum(c, key) && + key_type(c, key1) == UBIFS_DATA_KEY) { + key1 = &znode->zbranch[n].key; + if (key_inum(c, key1) != key_inum(c, key) || + key_type(c, key1) != UBIFS_DATA_KEY) { + keep = n; + move = c->fanout - keep; + zi = znode; + goto do_split; + } + } + } } if (appending) { @@ -2046,6 +2059,8 @@ again: zbr->znode->parent = zn; } +do_split: + __set_bit(DIRTY_ZNODE, &zn->flags); atomic_long_inc(&c->dirty_zn_cnt); @@ -2072,14 +2087,11 @@ again: /* Insert new znode (produced by spitting) into the parent */ if (zp) { - i = n; + if (n == 0 && zi == znode && znode->iip == 0) + correct_parent_keys(c, znode); + /* Locate insertion point */ n = znode->iip + 1; - if (appending && n != c->fanout) - appending = 0; - - if (i == 0 && zi == znode && znode->iip == 0) - correct_parent_keys(c, znode); /* Tail recursion */ zbr->key = zn->zbranch[0].key; -- cgit v0.10.2 From ccb3eba72453a3b5aa37dda02e3a690449e3d229 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 8 Sep 2008 16:07:01 +0300 Subject: UBIFS: check data CRC when in error state When UBIFS switches to R/O mode because of an error, it is reasonable to enable data CRC checking. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 40e2790..0168271 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -62,6 +62,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) { if (!c->ro_media) { c->ro_media = 1; + c->no_chk_data_crc = 0; ubifs_warn("switched to read-only mode, error %d", err); dbg_dump_stack(); } -- cgit v0.10.2 From 625bf371c1522764fc1cf2981b041c5f9a19e894 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 8 Sep 2008 16:13:38 +0300 Subject: UBIFS: use bit-fields when possible The "bulk_read" and "no_chk_data_crc" have only 2 values - 0 and 1. We already have bit-fields in corresponding data structers, so make "bulk_read" and "no_chk_data_crc" bit-fields as well. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index d6ae3f7..542cbaf 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -331,6 +331,7 @@ struct ubifs_gced_idx_leb { * this inode * @dirty: non-zero if the inode is dirty * @xattr: non-zero if this is an extended attribute inode + * @bulk_read: non-zero if bulk-read should be used * @ui_mutex: serializes inode write-back with the rest of VFS operations, * serializes "clean <-> dirty" state changes, serializes bulk-read, * protects @dirty, @ui_size, and @xattr_size @@ -343,7 +344,6 @@ struct ubifs_gced_idx_leb { * @compr_type: default compression type used for this inode * @last_page_read: page number of last page read (for bulk read) * @read_in_a_row: number of consecutive pages read in a row (for bulk read) - * @bulk_read: indicates whether bulk-read should be used * @data_len: length of the data attached to the inode * @data: inode's data * @@ -385,6 +385,7 @@ struct ubifs_inode { unsigned int xattr_names; unsigned int dirty:1; unsigned int xattr:1; + unsigned int bulk_read:1; struct mutex ui_mutex; spinlock_t ui_lock; loff_t synced_i_size; @@ -393,7 +394,6 @@ struct ubifs_inode { int compr_type; pgoff_t last_page_read; pgoff_t read_in_a_row; - int bulk_read; int data_len; void *data; }; @@ -940,6 +940,7 @@ struct ubifs_mount_opts { * @cmt_state: commit state * @cs_lock: commit state lock * @cmt_wq: wait queue to sleep on if the log is full and a commit is running + * * @fast_unmount: do not run journal commit before un-mounting * @big_lpt: flag that LPT is too big to write whole during commit * @check_lpt_free: flag that indicates LPT GC may be needed @@ -947,6 +948,9 @@ struct ubifs_mount_opts { * optimization) * @nospace_rp: the same as @nospace, but additionally means that even reserved * pool is full + * @no_chk_data_crc: do not check CRCs when reading data nodes (except during + * recovery) + * @bulk_read: enable bulk-reads * * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and * @calc_idx_sz @@ -970,6 +974,7 @@ struct ubifs_mount_opts { * @mst_node: master node * @mst_offs: offset of valid master node * @mst_mutex: protects the master node area, @mst_node, and @mst_offs + * @bulk_read_buf_size: buffer size for bulk-reads * * @log_lebs: number of logical eraseblocks in the log * @log_bytes: log size in bytes @@ -1000,12 +1005,6 @@ struct ubifs_mount_opts { * @old_leb_cnt: count of logical eraseblocks before re-size * @ro_media: the underlying UBI volume is read-only * - * @bulk_read: enable bulk-reads - * @bulk_read_buf_size: buffer size for bulk-reads - * - * @no_chk_data_crc: do not check CRCs when reading data nodes (except during - * recovery) - * * @dirty_pg_cnt: number of dirty pages (not used) * @dirty_zn_cnt: number of dirty znodes * @clean_zn_cnt: number of clean znodes @@ -1188,11 +1187,14 @@ struct ubifs_info { int cmt_state; spinlock_t cs_lock; wait_queue_head_t cmt_wq; + unsigned int fast_unmount:1; unsigned int big_lpt:1; unsigned int check_lpt_free:1; unsigned int nospace:1; unsigned int nospace_rp:1; + unsigned int no_chk_data_crc:1; + unsigned int bulk_read:1; struct mutex tnc_mutex; struct ubifs_zbranch zroot; @@ -1217,6 +1219,7 @@ struct ubifs_info { struct ubifs_mst_node *mst_node; int mst_offs; struct mutex mst_mutex; + int bulk_read_buf_size; int log_lebs; long long log_bytes; @@ -1247,11 +1250,6 @@ struct ubifs_info { int old_leb_cnt; int ro_media; - int bulk_read; - int bulk_read_buf_size; - - int no_chk_data_crc; - atomic_long_t dirty_pg_cnt; atomic_long_t dirty_zn_cnt; atomic_long_t clean_zn_cnt; -- cgit v0.10.2 From 2094c334fdebbcceddf21f97cb16b144707af56e Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 5 Sep 2008 15:20:04 +0300 Subject: UBIFS: correct key comparison The comparison was working, but more by accident than design. Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c index a25c1cc..b48db99 100644 --- a/fs/ubifs/tnc_misc.c +++ b/fs/ubifs/tnc_misc.c @@ -480,8 +480,8 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, } /* Make sure the key of the read node is correct */ - key_read(c, key, &key1); - if (memcmp(node + UBIFS_KEY_OFFSET, &key1, c->key_len)) { + key_read(c, node + UBIFS_KEY_OFFSET, &key1); + if (!keys_eq(c, key, &key1)) { ubifs_err("bad key in node at LEB %d:%d", zbr->lnum, zbr->offs); dbg_tnc("looked for key %s found node's key %s", -- cgit v0.10.2 From ed382d5898ccfc3d7ba775be2f1596f6a1547935 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 5 Sep 2008 16:17:42 +0300 Subject: UBIFS: ensure data read beyond i_size is zeroed out correctly Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index cdcfe95..2f20a49 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -147,6 +147,12 @@ static int do_readpage(struct page *page) err = ret; if (err != -ENOENT) break; + } else if (block + 1 == beyond) { + int dlen = le32_to_cpu(dn->size); + int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); + + if (ilen && ilen < dlen) + memset(addr + ilen, 0, dlen - ilen); } } if (++i >= UBIFS_BLOCKS_PER_PAGE) @@ -601,7 +607,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, addr = zaddr = kmap(page); - end_index = (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; if (!i_size || page->index > end_index) { memset(addr, 0, PAGE_CACHE_SIZE); goto out_hole; @@ -649,7 +655,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, if (end_index == page->index) { int len = i_size & (PAGE_CACHE_SIZE - 1); - if (len < read) + if (len && len < read) memset(zaddr + len, 0, read - len); } diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h index a9ecbd9..0b37804 100644 --- a/fs/ubifs/ubifs-media.h +++ b/fs/ubifs/ubifs-media.h @@ -75,7 +75,6 @@ */ #define UBIFS_BLOCK_SIZE 4096 #define UBIFS_BLOCK_SHIFT 12 -#define UBIFS_BLOCK_MASK 0x00000FFF /* UBIFS padding byte pattern (must not be first or last byte of node magic) */ #define UBIFS_PADDING_BYTE 0xCE -- cgit v0.10.2 From ba60ecabf067c8ecbde47af4d99b74ee57234d8e Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 8 Sep 2008 16:38:01 +0300 Subject: UBIFS: fix races in bit-fields We cannot store bit-fields together if the processes which change them may race, unless we serialize them. Thus, move the nospc and nospc_rp bit-fields eway from the mount option/constant bit-fields, to avoid races. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 542cbaf..c3ac5a8 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -334,7 +334,7 @@ struct ubifs_gced_idx_leb { * @bulk_read: non-zero if bulk-read should be used * @ui_mutex: serializes inode write-back with the rest of VFS operations, * serializes "clean <-> dirty" state changes, serializes bulk-read, - * protects @dirty, @ui_size, and @xattr_size + * protects @dirty, @bulk_read, @ui_size, and @xattr_size * @ui_lock: protects @synced_i_size * @synced_i_size: synchronized size of inode, i.e. the value of inode size * currently stored on the flash; used only for regular file @@ -944,10 +944,6 @@ struct ubifs_mount_opts { * @fast_unmount: do not run journal commit before un-mounting * @big_lpt: flag that LPT is too big to write whole during commit * @check_lpt_free: flag that indicates LPT GC may be needed - * @nospace: non-zero if the file-system does not have flash space (used as - * optimization) - * @nospace_rp: the same as @nospace, but additionally means that even reserved - * pool is full * @no_chk_data_crc: do not check CRCs when reading data nodes (except during * recovery) * @bulk_read: enable bulk-reads @@ -1017,12 +1013,17 @@ struct ubifs_mount_opts { * but which still have to be taken into account because * the index has not been committed so far * @space_lock: protects @budg_idx_growth, @budg_data_growth, @budg_dd_growth, - * @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, and @lst; + * @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, @lst, + * @nospace, and @nospace_rp; * @min_idx_lebs: minimum number of LEBs required for the index * @old_idx_sz: size of index on flash * @calc_idx_sz: temporary variable which is used to calculate new index size * (contains accurate new index size at end of TNC commit start) * @lst: lprops statistics + * @nospace: non-zero if the file-system does not have flash space (used as + * optimization) + * @nospace_rp: the same as @nospace, but additionally means that even reserved + * pool is full * * @page_budget: budget for a page * @inode_budget: budget for an inode @@ -1191,8 +1192,6 @@ struct ubifs_info { unsigned int fast_unmount:1; unsigned int big_lpt:1; unsigned int check_lpt_free:1; - unsigned int nospace:1; - unsigned int nospace_rp:1; unsigned int no_chk_data_crc:1; unsigned int bulk_read:1; @@ -1263,6 +1262,8 @@ struct ubifs_info { unsigned long long old_idx_sz; unsigned long long calc_idx_sz; struct ubifs_lp_stats lst; + unsigned int nospace:1; + unsigned int nospace_rp:1; int page_budget; int inode_budget; -- cgit v0.10.2 From be61678b1d97c9b47bb839beb3c3f48da36af072 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 8 Sep 2008 18:08:39 +0300 Subject: UBIFS: fix commentary Znode may refer both data nodes and indexing nodes Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index c3ac5a8..49b06c9 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -707,8 +707,8 @@ struct ubifs_jhead { * struct ubifs_zbranch - key/coordinate/length branch stored in znodes. * @key: key * @znode: znode address in memory - * @lnum: LEB number of the indexing node - * @offs: offset of the indexing node within @lnum + * @lnum: LEB number of the target node (indexing node or data node) + * @offs: target node offset within @lnum * @len: target node length */ struct ubifs_zbranch { -- cgit v0.10.2 From b5e426e9a4a8d4ccc65a6849420d47f87c080d5d Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Sep 2008 11:20:35 +0300 Subject: UBIFS: update dbg_dump_inode 'dbg_dump_inode()' is quite outdated and does not print all the fileds. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index d7f7645..32071ec 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -222,30 +222,38 @@ void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode) { const struct ubifs_inode *ui = ubifs_inode(inode); - printk(KERN_DEBUG "inode %lu\n", inode->i_ino); - printk(KERN_DEBUG "size %llu\n", + printk(KERN_DEBUG "Dump in-memory inode:"); + printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino); + printk(KERN_DEBUG "\tsize %llu\n", (unsigned long long)i_size_read(inode)); - printk(KERN_DEBUG "nlink %u\n", inode->i_nlink); - printk(KERN_DEBUG "uid %u\n", (unsigned int)inode->i_uid); - printk(KERN_DEBUG "gid %u\n", (unsigned int)inode->i_gid); - printk(KERN_DEBUG "atime %u.%u\n", + printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink); + printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid); + printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid); + printk(KERN_DEBUG "\tatime %u.%u\n", (unsigned int)inode->i_atime.tv_sec, (unsigned int)inode->i_atime.tv_nsec); - printk(KERN_DEBUG "mtime %u.%u\n", + printk(KERN_DEBUG "\tmtime %u.%u\n", (unsigned int)inode->i_mtime.tv_sec, (unsigned int)inode->i_mtime.tv_nsec); - printk(KERN_DEBUG "ctime %u.%u\n", + printk(KERN_DEBUG "\tctime %u.%u\n", (unsigned int)inode->i_ctime.tv_sec, (unsigned int)inode->i_ctime.tv_nsec); - printk(KERN_DEBUG "creat_sqnum %llu\n", ui->creat_sqnum); - printk(KERN_DEBUG "xattr_size %u\n", ui->xattr_size); - printk(KERN_DEBUG "xattr_cnt %u\n", ui->xattr_cnt); - printk(KERN_DEBUG "xattr_names %u\n", ui->xattr_names); - printk(KERN_DEBUG "dirty %u\n", ui->dirty); - printk(KERN_DEBUG "xattr %u\n", ui->xattr); - printk(KERN_DEBUG "flags %d\n", ui->flags); - printk(KERN_DEBUG "compr_type %d\n", ui->compr_type); - printk(KERN_DEBUG "data_len %d\n", ui->data_len); + printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum); + printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size); + printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt); + printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names); + printk(KERN_DEBUG "\tdirty %u\n", ui->dirty); + printk(KERN_DEBUG "\txattr %u\n", ui->xattr); + printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr); + printk(KERN_DEBUG "\tsynced_i_size %llu\n", + (unsigned long long)ui->synced_i_size); + printk(KERN_DEBUG "\tui_size %llu\n", + (unsigned long long)ui->ui_size); + printk(KERN_DEBUG "\tflags %d\n", ui->flags); + printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type); + printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read); + printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row); + printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len); } void dbg_dump_node(const struct ubifs_info *c, const void *node) -- cgit v0.10.2 From af2eb5637b88f7b8edf295ad3880706c5c30c324 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Sep 2008 12:23:50 +0300 Subject: UBIFS: correct comment for commit_on_unmount Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index cf078b5..dae1c62 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1465,12 +1465,9 @@ out: * commit_on_unmount - commit the journal when un-mounting. * @c: UBIFS file-system description object * - * This function is called during un-mounting and it commits the journal unless - * the "fast unmount" mode is enabled. It also avoids committing the journal if - * it contains too few data. - * - * Sometimes recovery requires the journal to be committed at least once, and - * this function takes care about this. + * This function is called during un-mounting and re-mounting, and it commits + * the journal unless the "fast unmount" mode is enabled. It also avoids + * committing the journal if it contains too few data. */ static void commit_on_unmount(struct ubifs_info *c) { -- cgit v0.10.2 From 403e12ab30ab160e1015bd998f0abc1865c574e0 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Sep 2008 12:31:37 +0300 Subject: UBIFS: commit on sync_fs Commit the journal when the FS is sync'ed. This will make statfs provide better free space report. And we anyway advice our users to sync the FS if they want better statfs report. Signed-off-by: Artem Bityutskiy diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index dae1c62..7e1f3ef 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -418,6 +418,7 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) { struct ubifs_info *c = sb->s_fs_info; int i, ret = 0, err; + long long bud_bytes; if (c->jheads) for (i = 0; i < c->jhead_cnt; i++) { @@ -425,6 +426,17 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) if (err && !ret) ret = err; } + + /* Commit the journal unless it has too few data */ + spin_lock(&c->buds_lock); + bud_bytes = c->bud_bytes; + spin_unlock(&c->buds_lock); + if (bud_bytes > c->leb_size) { + err = ubifs_run_commit(c); + if (err) + return err; + } + /* * We ought to call sync for c->ubi but it does not have one. If it had * it would in turn call mtd->sync, however mtd operations are -- cgit v0.10.2 From bed79935de9a658678f44b88a097367d3b26429f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 11 Sep 2008 14:25:44 +0300 Subject: UBIFS: allow for sync_fs when read-only sync_fs can be called even if the file system is mounted read-only. Ensure the commit is not run in that case. Reported-by: Zoltan Sogor Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 7e1f3ef..7fd759d 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -420,21 +420,22 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) int i, ret = 0, err; long long bud_bytes; - if (c->jheads) + if (c->jheads) { for (i = 0; i < c->jhead_cnt; i++) { err = ubifs_wbuf_sync(&c->jheads[i].wbuf); if (err && !ret) ret = err; } - /* Commit the journal unless it has too few data */ - spin_lock(&c->buds_lock); - bud_bytes = c->bud_bytes; - spin_unlock(&c->buds_lock); - if (bud_bytes > c->leb_size) { - err = ubifs_run_commit(c); - if (err) - return err; + /* Commit the journal unless it has too little data */ + spin_lock(&c->buds_lock); + bud_bytes = c->bud_bytes; + spin_unlock(&c->buds_lock); + if (bud_bytes > c->leb_size) { + err = ubifs_run_commit(c); + if (err) + return err; + } } /* -- cgit v0.10.2 From 46773be497a05010a2873e9ad96d739fb352c1e4 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 11 Sep 2008 12:57:49 +0300 Subject: UBIFS: improve garbage collection Make garbage collection try to keep data nodes from the same inode together and in ascending order. This improves performance when reading those nodes especially when bulk-read is used. Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index a6b633a..0bef650 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c @@ -96,6 +96,48 @@ static int switch_gc_head(struct ubifs_info *c) } /** + * joinup - bring data nodes for an inode together. + * @c: UBIFS file-system description object + * @sleb: describes scanned LEB + * @inum: inode number + * @blk: block number + * @data: list to which to add data nodes + * + * This function looks at the first few nodes in the scanned LEB @sleb and adds + * them to @data if they are data nodes from @inum and have a larger block + * number than @blk. This function returns %0 on success and a negative error + * code on failure. + */ +static int joinup(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ino_t inum, + unsigned int blk, struct list_head *data) +{ + int err, cnt = 6, lnum = sleb->lnum, offs; + struct ubifs_scan_node *snod, *tmp; + union ubifs_key *key; + + list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { + key = &snod->key; + if (key_inum(c, key) == inum && + key_type(c, key) == UBIFS_DATA_KEY && + key_block(c, key) > blk) { + offs = snod->offs; + err = ubifs_tnc_has_node(c, key, 0, lnum, offs, 0); + if (err < 0) + return err; + list_del(&snod->list); + if (err) { + list_add_tail(&snod->list, data); + blk = key_block(c, key); + } else + kfree(snod); + cnt = 6; + } else if (--cnt == 0) + break; + } + return 0; +} + +/** * move_nodes - move nodes. * @c: UBIFS file-system description object * @sleb: describes nodes to move @@ -116,16 +158,21 @@ static int switch_gc_head(struct ubifs_info *c) static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) { struct ubifs_scan_node *snod, *tmp; - struct list_head large, medium, small; + struct list_head data, large, medium, small; struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; int avail, err, min = INT_MAX; + unsigned int blk = 0; + ino_t inum = 0; + INIT_LIST_HEAD(&data); INIT_LIST_HEAD(&large); INIT_LIST_HEAD(&medium); INIT_LIST_HEAD(&small); - list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { - struct list_head *lst; + while (!list_empty(&sleb->nodes)) { + struct list_head *lst = sleb->nodes.next; + + snod = list_entry(lst, struct ubifs_scan_node, list); ubifs_assert(snod->type != UBIFS_IDX_NODE); ubifs_assert(snod->type != UBIFS_REF_NODE); @@ -136,7 +183,6 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) if (err < 0) goto out; - lst = &snod->list; list_del(lst); if (!err) { /* The node is obsolete, remove it from the list */ @@ -145,15 +191,30 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) } /* - * Sort the list of nodes so that large nodes go first, and - * small nodes go last. + * Sort the list of nodes so that data nodes go first, large + * nodes go second, and small nodes go last. */ - if (snod->len > MEDIUM_NODE_WM) - list_add(lst, &large); + if (key_type(c, &snod->key) == UBIFS_DATA_KEY) { + if (inum != key_inum(c, &snod->key)) { + if (inum) { + /* + * Try to move data nodes from the same + * inode together. + */ + err = joinup(c, sleb, inum, blk, &data); + if (err) + goto out; + } + inum = key_inum(c, &snod->key); + blk = key_block(c, &snod->key); + } + list_add_tail(lst, &data); + } else if (snod->len > MEDIUM_NODE_WM) + list_add_tail(lst, &large); else if (snod->len > SMALL_NODE_WM) - list_add(lst, &medium); + list_add_tail(lst, &medium); else - list_add(lst, &small); + list_add_tail(lst, &small); /* And find the smallest node */ if (snod->len < min) @@ -164,6 +225,7 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) * Join the tree lists so that we'd have one roughly sorted list * ('large' will be the head of the joined list). */ + list_splice(&data, &large); list_splice(&medium, large.prev); list_splice(&small, large.prev); -- cgit v0.10.2 From 5c0013c16bd2ee08ffef1a1365622556a57218f5 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 12 Sep 2008 10:34:51 +0300 Subject: UBIFS: fix bulk-read handling uptodate pages Bulk-read skips uptodate pages but this was putting its array index out and causing it to treat subsequent pages as holes. Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 2f20a49..51cf511 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -595,7 +595,7 @@ out: static int populate_page(struct ubifs_info *c, struct page *page, struct bu_info *bu, int *n) { - int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 1, read = 0; + int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; struct inode *inode = page->mapping->host; loff_t i_size = i_size_read(inode); unsigned int page_block; @@ -609,6 +609,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; if (!i_size || page->index > end_index) { + hole = 1; memset(addr, 0, PAGE_CACHE_SIZE); goto out_hole; } @@ -617,10 +618,10 @@ static int populate_page(struct ubifs_info *c, struct page *page, while (1) { int err, len, out_len, dlen; - if (nn >= bu->cnt || - key_block(c, &bu->zbranch[nn].key) != page_block) + if (nn >= bu->cnt) { + hole = 1; memset(addr, 0, UBIFS_BLOCK_SIZE); - else { + } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { struct ubifs_data_node *dn; dn = bu->buf + (bu->zbranch[nn].offs - offs); @@ -643,8 +644,13 @@ static int populate_page(struct ubifs_info *c, struct page *page, memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); nn += 1; - hole = 0; read = (i << UBIFS_BLOCK_SHIFT) + len; + } else if (key_block(c, &bu->zbranch[nn].key) < page_block) { + nn += 1; + continue; + } else { + hole = 1; + memset(addr, 0, UBIFS_BLOCK_SIZE); } if (++i >= UBIFS_BLOCKS_PER_PAGE) break; -- cgit v0.10.2 From 73944a6de048c2c49422e9063e57198256efd23e Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 12 Sep 2008 18:13:31 +0300 Subject: UBIFS: add more debugging messages for LPT Also add debugging checks for LPT size and separate out c->check_lpt_free from unrelated bitfields. Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 32071ec..7186400 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -655,6 +655,43 @@ void dbg_dump_lprops(struct ubifs_info *c) } } +void dbg_dump_lpt_info(struct ubifs_info *c) +{ + int i; + + spin_lock(&dbg_lock); + printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz); + printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz); + printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz); + printk(KERN_DEBUG "\tltab_sz: %d\n", c->ltab_sz); + printk(KERN_DEBUG "\tlsave_sz: %d\n", c->lsave_sz); + printk(KERN_DEBUG "\tbig_lpt: %d\n", c->big_lpt); + printk(KERN_DEBUG "\tlpt_hght: %d\n", c->lpt_hght); + printk(KERN_DEBUG "\tpnode_cnt: %d\n", c->pnode_cnt); + printk(KERN_DEBUG "\tnnode_cnt: %d\n", c->nnode_cnt); + printk(KERN_DEBUG "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt); + printk(KERN_DEBUG "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt); + printk(KERN_DEBUG "\tlsave_cnt: %d\n", c->lsave_cnt); + printk(KERN_DEBUG "\tspace_bits: %d\n", c->space_bits); + printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits); + printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits); + printk(KERN_DEBUG "\tlpt_spc_bits: %d\n", c->lpt_spc_bits); + printk(KERN_DEBUG "\tpcnt_bits: %d\n", c->pcnt_bits); + printk(KERN_DEBUG "\tlnum_bits: %d\n", c->lnum_bits); + printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); + printk(KERN_DEBUG "\tLPT head is at %d:%d\n", + c->nhead_lnum, c->nhead_offs); + printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); + if (c->big_lpt) + printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n", + c->lsave_lnum, c->lsave_offs); + for (i = 0; i < c->lpt_lebs; i++) + printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d " + "cmt %d\n", i + c->lpt_first, c->ltab[i].free, + c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt); + spin_unlock(&dbg_lock); +} + void dbg_dump_leb(const struct ubifs_info *c, int lnum) { struct ubifs_scan_leb *sleb; diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 50315fc..33d6b95 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -224,6 +224,7 @@ void dbg_dump_lstats(const struct ubifs_lp_stats *lst); void dbg_dump_budg(struct ubifs_info *c); void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp); void dbg_dump_lprops(struct ubifs_info *c); +void dbg_dump_lpt_info(struct ubifs_info *c); void dbg_dump_leb(const struct ubifs_info *c, int lnum); void dbg_dump_znode(const struct ubifs_info *c, const struct ubifs_znode *znode); @@ -249,6 +250,8 @@ int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot); int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot); int dbg_check_cats(struct ubifs_info *c); int dbg_check_ltab(struct ubifs_info *c); +int dbg_chk_lpt_free_spc(struct ubifs_info *c); +int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len); int dbg_check_synced_i_size(struct inode *inode); int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir); int dbg_check_tnc(struct ubifs_info *c, int extra); @@ -367,6 +370,7 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define dbg_dump_budg(c) ({}) #define dbg_dump_lprop(c, lp) ({}) #define dbg_dump_lprops(c) ({}) +#define dbg_dump_lpt_info(c) ({}) #define dbg_dump_leb(c, lnum) ({}) #define dbg_dump_znode(c, znode) ({}) #define dbg_dump_heap(c, heap, cat) ({}) @@ -379,6 +383,8 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define dbg_check_old_index(c, zroot) 0 #define dbg_check_cats(c) 0 #define dbg_check_ltab(c) 0 +#define dbg_chk_lpt_free_spc(c) 0 +#define dbg_chk_lpt_sz(c, action, len) 0 #define dbg_check_synced_i_size(inode) 0 #define dbg_check_dir_size(c, dir) 0 #define dbg_check_tnc(c, x) 0 diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 9ff2463..cd11b23 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -109,7 +109,8 @@ static void do_calc_lpt_geom(struct ubifs_info *c) c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz; c->lpt_sz += c->ltab_sz; - c->lpt_sz += c->lsave_sz; + if (c->big_lpt) + c->lpt_sz += c->lsave_sz; /* Add wastage */ sz = c->lpt_sz; diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 5f0b83e..8546865 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -177,8 +177,6 @@ static int alloc_lpt_leb(struct ubifs_info *c, int *lnum) return 0; } } - dbg_err("last LEB %d", *lnum); - dump_stack(); return -ENOSPC; } @@ -193,6 +191,9 @@ static int layout_cnodes(struct ubifs_info *c) int lnum, offs, len, alen, done_lsave, done_ltab, err; struct ubifs_cnode *cnode; + err = dbg_chk_lpt_sz(c, 0, 0); + if (err) + return err; cnode = c->lpt_cnext; if (!cnode) return 0; @@ -206,6 +207,7 @@ static int layout_cnodes(struct ubifs_info *c) c->lsave_lnum = lnum; c->lsave_offs = offs; offs += c->lsave_sz; + dbg_chk_lpt_sz(c, 1, c->lsave_sz); } if (offs + c->ltab_sz <= c->leb_size) { @@ -213,6 +215,7 @@ static int layout_cnodes(struct ubifs_info *c) c->ltab_lnum = lnum; c->ltab_offs = offs; offs += c->ltab_sz; + dbg_chk_lpt_sz(c, 1, c->ltab_sz); } do { @@ -226,9 +229,10 @@ static int layout_cnodes(struct ubifs_info *c) while (offs + len > c->leb_size) { alen = ALIGN(offs, c->min_io_size); upd_ltab(c, lnum, c->leb_size - alen, alen - offs); + dbg_chk_lpt_sz(c, 2, alen - offs); err = alloc_lpt_leb(c, &lnum); if (err) - return err; + goto no_space; offs = 0; ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); @@ -238,6 +242,7 @@ static int layout_cnodes(struct ubifs_info *c) c->lsave_lnum = lnum; c->lsave_offs = offs; offs += c->lsave_sz; + dbg_chk_lpt_sz(c, 1, c->lsave_sz); continue; } if (!done_ltab) { @@ -245,6 +250,7 @@ static int layout_cnodes(struct ubifs_info *c) c->ltab_lnum = lnum; c->ltab_offs = offs; offs += c->ltab_sz; + dbg_chk_lpt_sz(c, 1, c->ltab_sz); continue; } break; @@ -257,6 +263,7 @@ static int layout_cnodes(struct ubifs_info *c) c->lpt_offs = offs; } offs += len; + dbg_chk_lpt_sz(c, 1, len); cnode = cnode->cnext; } while (cnode && cnode != c->lpt_cnext); @@ -265,9 +272,10 @@ static int layout_cnodes(struct ubifs_info *c) if (offs + c->lsave_sz > c->leb_size) { alen = ALIGN(offs, c->min_io_size); upd_ltab(c, lnum, c->leb_size - alen, alen - offs); + dbg_chk_lpt_sz(c, 2, alen - offs); err = alloc_lpt_leb(c, &lnum); if (err) - return err; + goto no_space; offs = 0; ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); @@ -276,6 +284,7 @@ static int layout_cnodes(struct ubifs_info *c) c->lsave_lnum = lnum; c->lsave_offs = offs; offs += c->lsave_sz; + dbg_chk_lpt_sz(c, 1, c->lsave_sz); } /* Make sure to place LPT's own lprops table */ @@ -283,9 +292,10 @@ static int layout_cnodes(struct ubifs_info *c) if (offs + c->ltab_sz > c->leb_size) { alen = ALIGN(offs, c->min_io_size); upd_ltab(c, lnum, c->leb_size - alen, alen - offs); + dbg_chk_lpt_sz(c, 2, alen - offs); err = alloc_lpt_leb(c, &lnum); if (err) - return err; + goto no_space; offs = 0; ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); @@ -294,11 +304,23 @@ static int layout_cnodes(struct ubifs_info *c) c->ltab_lnum = lnum; c->ltab_offs = offs; offs += c->ltab_sz; + dbg_chk_lpt_sz(c, 1, c->ltab_sz); } alen = ALIGN(offs, c->min_io_size); upd_ltab(c, lnum, c->leb_size - alen, alen - offs); + dbg_chk_lpt_sz(c, 4, alen - offs); + err = dbg_chk_lpt_sz(c, 3, alen); + if (err) + return err; return 0; + +no_space: + ubifs_err("LPT out of space"); + dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " + "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); + dbg_dump_lpt_info(c); + return err; } /** @@ -333,8 +355,6 @@ static int realloc_lpt_leb(struct ubifs_info *c, int *lnum) *lnum = i + c->lpt_first; return 0; } - dbg_err("last LEB %d", *lnum); - dump_stack(); return -ENOSPC; } @@ -369,12 +389,14 @@ static int write_cnodes(struct ubifs_info *c) done_lsave = 1; ubifs_pack_lsave(c, buf + offs, c->lsave); offs += c->lsave_sz; + dbg_chk_lpt_sz(c, 1, c->lsave_sz); } if (offs + c->ltab_sz <= c->leb_size) { done_ltab = 1; ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); offs += c->ltab_sz; + dbg_chk_lpt_sz(c, 1, c->ltab_sz); } /* Loop for each cnode */ @@ -392,10 +414,12 @@ static int write_cnodes(struct ubifs_info *c) alen, UBI_SHORTTERM); if (err) return err; + dbg_chk_lpt_sz(c, 4, alen - wlen); } + dbg_chk_lpt_sz(c, 2, 0); err = realloc_lpt_leb(c, &lnum); if (err) - return err; + goto no_space; offs = 0; from = 0; ubifs_assert(lnum >= c->lpt_first && @@ -408,12 +432,14 @@ static int write_cnodes(struct ubifs_info *c) done_lsave = 1; ubifs_pack_lsave(c, buf + offs, c->lsave); offs += c->lsave_sz; + dbg_chk_lpt_sz(c, 1, c->lsave_sz); continue; } if (!done_ltab) { done_ltab = 1; ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); offs += c->ltab_sz; + dbg_chk_lpt_sz(c, 1, c->ltab_sz); continue; } break; @@ -435,6 +461,7 @@ static int write_cnodes(struct ubifs_info *c) clear_bit(COW_ZNODE, &cnode->flags); smp_mb__after_clear_bit(); offs += len; + dbg_chk_lpt_sz(c, 1, len); cnode = cnode->cnext; } while (cnode && cnode != c->lpt_cnext); @@ -448,9 +475,10 @@ static int write_cnodes(struct ubifs_info *c) UBI_SHORTTERM); if (err) return err; + dbg_chk_lpt_sz(c, 2, alen - wlen); err = realloc_lpt_leb(c, &lnum); if (err) - return err; + goto no_space; offs = 0; ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); @@ -461,6 +489,7 @@ static int write_cnodes(struct ubifs_info *c) done_lsave = 1; ubifs_pack_lsave(c, buf + offs, c->lsave); offs += c->lsave_sz; + dbg_chk_lpt_sz(c, 1, c->lsave_sz); } /* Make sure to place LPT's own lprops table */ @@ -473,9 +502,10 @@ static int write_cnodes(struct ubifs_info *c) UBI_SHORTTERM); if (err) return err; + dbg_chk_lpt_sz(c, 2, alen - wlen); err = realloc_lpt_leb(c, &lnum); if (err) - return err; + goto no_space; offs = 0; ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); @@ -486,6 +516,7 @@ static int write_cnodes(struct ubifs_info *c) done_ltab = 1; ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); offs += c->ltab_sz; + dbg_chk_lpt_sz(c, 1, c->ltab_sz); } /* Write remaining data in buffer */ @@ -495,6 +526,12 @@ static int write_cnodes(struct ubifs_info *c) err = ubifs_leb_write(c, lnum, buf + from, from, alen, UBI_SHORTTERM); if (err) return err; + + dbg_chk_lpt_sz(c, 4, alen - wlen); + err = dbg_chk_lpt_sz(c, 3, ALIGN(offs, c->min_io_size)); + if (err) + return err; + c->nhead_lnum = lnum; c->nhead_offs = ALIGN(offs, c->min_io_size); @@ -503,7 +540,15 @@ static int write_cnodes(struct ubifs_info *c) dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); if (c->big_lpt) dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); + return 0; + +no_space: + ubifs_err("LPT out of space mismatch"); + dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " + "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); + dbg_dump_lpt_info(c); + return err; } /** @@ -1156,6 +1201,9 @@ int ubifs_lpt_start_commit(struct ubifs_info *c) dbg_lp(""); mutex_lock(&c->lp_mutex); + err = dbg_chk_lpt_free_spc(c); + if (err) + goto out; err = dbg_check_ltab(c); if (err) goto out; @@ -1645,4 +1693,121 @@ int dbg_check_ltab(struct ubifs_info *c) return 0; } +/** + * dbg_chk_lpt_free_spc - check LPT free space is enough to write entire LPT. + * @c: the UBIFS file-system description object + * + * This function returns %0 on success and a negative error code on failure. + */ +int dbg_chk_lpt_free_spc(struct ubifs_info *c) +{ + long long free = 0; + int i; + + for (i = 0; i < c->lpt_lebs; i++) { + if (c->ltab[i].tgc || c->ltab[i].cmt) + continue; + if (i + c->lpt_first == c->nhead_lnum) + free += c->leb_size - c->nhead_offs; + else if (c->ltab[i].free == c->leb_size) + free += c->leb_size; + } + if (free < c->lpt_sz) { + dbg_err("LPT space error: free %lld lpt_sz %lld", + free, c->lpt_sz); + dbg_dump_lpt_info(c); + return -EINVAL; + } + return 0; +} + +/** + * dbg_chk_lpt_sz - check LPT does not write more than LPT size. + * @c: the UBIFS file-system description object + * @action: action + * @len: length written + * + * This function returns %0 on success and a negative error code on failure. + */ +int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) +{ + long long chk_lpt_sz, lpt_sz; + int err = 0; + + switch (action) { + case 0: + c->chk_lpt_sz = 0; + c->chk_lpt_sz2 = 0; + c->chk_lpt_lebs = 0; + c->chk_lpt_wastage = 0; + if (c->dirty_pn_cnt > c->pnode_cnt) { + dbg_err("dirty pnodes %d exceed max %d", + c->dirty_pn_cnt, c->pnode_cnt); + err = -EINVAL; + } + if (c->dirty_nn_cnt > c->nnode_cnt) { + dbg_err("dirty nnodes %d exceed max %d", + c->dirty_nn_cnt, c->nnode_cnt); + err = -EINVAL; + } + return err; + case 1: + c->chk_lpt_sz += len; + return 0; + case 2: + c->chk_lpt_sz += len; + c->chk_lpt_wastage += len; + c->chk_lpt_lebs += 1; + return 0; + case 3: + chk_lpt_sz = c->leb_size; + chk_lpt_sz *= c->chk_lpt_lebs; + chk_lpt_sz += len - c->nhead_offs; + if (c->chk_lpt_sz != chk_lpt_sz) { + dbg_err("LPT wrote %lld but space used was %lld", + c->chk_lpt_sz, chk_lpt_sz); + err = -EINVAL; + } + if (c->chk_lpt_sz > c->lpt_sz) { + dbg_err("LPT wrote %lld but lpt_sz is %lld", + c->chk_lpt_sz, c->lpt_sz); + err = -EINVAL; + } + if (c->chk_lpt_sz2 && c->chk_lpt_sz != c->chk_lpt_sz2) { + dbg_err("LPT layout size %lld but wrote %lld", + c->chk_lpt_sz, c->chk_lpt_sz2); + err = -EINVAL; + } + if (c->chk_lpt_sz2 && c->new_nhead_offs != len) { + dbg_err("LPT new nhead offs: expected %d was %d", + c->new_nhead_offs, len); + err = -EINVAL; + } + lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; + lpt_sz += (long long)c->nnode_cnt * c->nnode_sz; + lpt_sz += c->ltab_sz; + if (c->big_lpt) + lpt_sz += c->lsave_sz; + if (c->chk_lpt_sz - c->chk_lpt_wastage > lpt_sz) { + dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", + c->chk_lpt_sz, c->chk_lpt_wastage, lpt_sz); + err = -EINVAL; + } + if (err) + dbg_dump_lpt_info(c); + c->chk_lpt_sz2 = c->chk_lpt_sz; + c->chk_lpt_sz = 0; + c->chk_lpt_wastage = 0; + c->chk_lpt_lebs = 0; + c->new_nhead_offs = len; + return err; + case 4: + c->chk_lpt_sz += len; + c->chk_lpt_wastage += len; + return 0; + default: + return -EINVAL; + } +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 49b06c9..a7bd32f 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -943,7 +943,6 @@ struct ubifs_mount_opts { * * @fast_unmount: do not run journal commit before un-mounting * @big_lpt: flag that LPT is too big to write whole during commit - * @check_lpt_free: flag that indicates LPT GC may be needed * @no_chk_data_crc: do not check CRCs when reading data nodes (except during * recovery) * @bulk_read: enable bulk-reads @@ -1102,6 +1101,7 @@ struct ubifs_mount_opts { * @lpt_drty_flgs: dirty flags for LPT special nodes e.g. ltab * @dirty_nn_cnt: number of dirty nnodes * @dirty_pn_cnt: number of dirty pnodes + * @check_lpt_free: flag that indicates LPT GC may be needed * @lpt_sz: LPT size * @lpt_nod_buf: buffer for an on-flash nnode or pnode * @lpt_buf: buffer of LEB size used by LPT @@ -1191,7 +1191,6 @@ struct ubifs_info { unsigned int fast_unmount:1; unsigned int big_lpt:1; - unsigned int check_lpt_free:1; unsigned int no_chk_data_crc:1; unsigned int bulk_read:1; @@ -1340,6 +1339,7 @@ struct ubifs_info { int lpt_drty_flgs; int dirty_nn_cnt; int dirty_pn_cnt; + int check_lpt_free; long long lpt_sz; void *lpt_nod_buf; void *lpt_buf; @@ -1394,6 +1394,12 @@ struct ubifs_info { unsigned long fail_timeout; unsigned int fail_cnt; unsigned int fail_cnt_max; + long long chk_lpt_sz; + long long chk_lpt_sz2; + long long chk_lpt_wastage; + int chk_lpt_lebs; + int new_nhead_lnum; + int new_nhead_offs; #endif }; -- cgit v0.10.2 From 63c300b68fd93a9fadc5e317d4d001b7a6985486 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 17 Sep 2008 12:11:13 +0300 Subject: UBIFS: correct condition to eliminate unecessary assignment Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index e0878a4..d27fd91 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1600,7 +1600,7 @@ out: * An enormous hole could cause bulk-read to encompass too many * page cache pages, so limit the number here. */ - if (bu->blk_cnt >= UBIFS_MAX_BULK_READ) + if (bu->blk_cnt > UBIFS_MAX_BULK_READ) bu->blk_cnt = UBIFS_MAX_BULK_READ; /* * Ensure that bulk-read covers a whole number of page cache -- cgit v0.10.2 From be2f6bd62d0d4246a9227dacbe2469e1f0eccf26 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 26 Sep 2008 12:52:21 +0300 Subject: UBIFS: check buffer length when scanning for LPT nodes 'is_a_node()' function was reading from a buffer before checking the buffer length, resulting in an OOPS as follows: BUG: unable to handle kernel paging request at f8f74002 IP: [] :ubifs:ubifs_unpack_bits+0xca/0x233 *pde = 19e95067 *pte = 00000000 Oops: 0000 [#1] PREEMPT SMP Modules linked in: ubifs ubi mtdchar bio2mtd mtd brd video output [last unloaded: mtd] Pid: 6414, comm: integck Not tainted (2.6.27-rc6ubifs34 #23) EIP: 0060:[] EFLAGS: 00010246 CPU: 0 EIP is at ubifs_unpack_bits+0xca/0x233 [ubifs] EAX: 00000000 EBX: f6090630 ECX: d9badcfc EDX: 00000000 ESI: 00000004 EDI: f8f74002 EBP: d9badcec ESP: d9badcc0 DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068 Process integck (pid: 6414, ti=d9bac000 task=f727dae0 task.ti=d9bac000) Stack: 00000006 f7306240 00000002 00000000 d9badcfc d9badd00 0000001c 00000000 f6090630 f6090630 f8f74000 d9badd10 f8fa1cc9 00000000 f8f74002 00000000 f8f74002 f60fe128 f6090630 f8f74000 d9badd68 f8fa1e46 00000000 0001e000 Call Trace: [] ? is_a_node+0x30/0x90 [ubifs] [] ? dbg_check_ltab+0x11d/0x5bd [ubifs] [] ? ubifs_lpt_start_commit+0x42/0xed3 [ubifs] [] ? mutex_unlock+0x8/0xa [] ? ubifs_tnc_start_commit+0x1c8/0xedb [ubifs] [] ? do_commit+0x187/0x523 [ubifs] [] ? mutex_unlock+0x8/0xa [] ? bud_wbuf_callback+0x22/0x28 [ubifs] [] ? ubifs_run_commit+0x76/0xc0 [ubifs] [] ? ubifs_sync_fs+0xd2/0xe6 [ubifs] [] ? vfs_quota_sync+0x0/0x17e [] ? quota_sync_sb+0x26/0xbb [] ? vfs_quota_sync+0x0/0x17e [] ? sync_dquots+0x22/0x12c [] ? __fsync_super+0x19/0x68 [] ? fsync_super+0xb/0x19 [] ? generic_shutdown_super+0x22/0xe7 [] ? vfs_quota_off+0x0/0x5fd [] ? ubifs_kill_sb+0x31/0x35 [ubifs] [] ? deactivate_super+0x5e/0x71 [] ? mntput_no_expire+0x82/0xe4 [] ? sys_umount+0x4c/0x2f6 [] ? sys_oldumount+0x19/0x1b [] ? sysenter_do_call+0x12/0x25 ======================= Code: c1 f8 03 8d 04 07 8b 4d e8 89 01 8b 45 e4 89 10 89 d8 89 f1 d3 e8 85 c0 74 07 29 d6 83 fe 20 75 2a 89 d8 83 c4 20 5b 5e 5f 5d EIP: [] ubifs_unpack_bits+0xca/0x233 [ubifs] SS:ESP 0068:d9badcc0 ---[ end trace 1f02572436518c13 ]--- Signed-off-by: Adrian Hunter diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 8546865..eed5a00 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1089,6 +1089,8 @@ static int is_a_node(struct ubifs_info *c, uint8_t *buf, int len) int pos = 0, node_type, node_len; uint16_t crc, calc_crc; + if (len < UBIFS_LPT_CRC_BYTES + (UBIFS_LPT_TYPE_BITS + 7) / 8) + return 0; node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS); if (node_type == UBIFS_LPT_NOT_A_NODE) return 0; -- cgit v0.10.2 From 64c9627c2628bc3bd3291710b8ee6f8335883f8b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Oct 2008 15:12:27 +0900 Subject: sh: Fix up the __raw_read/writeX() definitions. These were doing largely bogus things and using the wrong typing for the address. Bring these in line with the ARM definitions. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 1857666..d9e794e 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -101,44 +101,33 @@ #define outsw __outsw #define outsl __outsl -#define __raw_readb(a) __readb((void __iomem *)(a)) -#define __raw_readw(a) __readw((void __iomem *)(a)) -#define __raw_readl(a) __readl((void __iomem *)(a)) -#define __raw_writeb(v, a) __writeb(v, (void __iomem *)(a)) -#define __raw_writew(v, a) __writew(v, (void __iomem *)(a)) -#define __raw_writel(v, a) __writel(v, (void __iomem *)(a)) +#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)) +#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)) +#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)) -void __raw_writesl(unsigned long addr, const void *data, int longlen); -void __raw_readsl(unsigned long addr, void *data, int longlen); +#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a)) +#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) +#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a)) + +void __raw_writesl(void __iomem *addr, const void *data, int longlen); +void __raw_readsl(const void __iomem *addr, void *data, int longlen); /* * The platform header files may define some of these macros to use * the inlined versions where appropriate. These macros may also be * redefined by userlevel programs. */ -#ifdef __readb -# define readb(a) ({ unsigned int r_ = __raw_readb(a); mb(); r_; }) -#endif -#ifdef __raw_readw -# define readw(a) ({ unsigned int r_ = __raw_readw(a); mb(); r_; }) -#endif -#ifdef __raw_readl -# define readl(a) ({ unsigned int r_ = __raw_readl(a); mb(); r_; }) -#endif +#define readb(a) ({ unsigned int r_ = __readb(a); mb(); r_; }) +#define readw(a) ({ unsigned int r_ = __readw(a); mb(); r_; }) +#define readl(a) ({ unsigned int r_ = __readl(a); mb(); r_; }) -#ifdef __raw_writeb -# define writeb(v,a) ({ __raw_writeb((v),(a)); mb(); }) -#endif -#ifdef __raw_writew -# define writew(v,a) ({ __raw_writew((v),(a)); mb(); }) -#endif -#ifdef __raw_writel -# define writel(v,a) ({ __raw_writel((v),(a)); mb(); }) -#endif +#define writeb(v,a) ({ __writeb((v),(a)); mb(); }) +#define writew(v,a) ({ __writew((v),(a)); mb(); }) +#define writel(v,a) ({ __writel((v),(a)); mb(); }) #define __BUILD_MEMORY_STRING(bwlq, type) \ \ -static inline void writes##bwlq(volatile void __iomem *mem, \ +static inline void __raw_writes##bwlq(volatile void __iomem *mem, \ const void *addr, unsigned int count) \ { \ const volatile type *__addr = addr; \ @@ -149,8 +138,8 @@ static inline void writes##bwlq(volatile void __iomem *mem, \ } \ } \ \ -static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ - unsigned int count) \ +static inline void __raw_reads##bwlq(volatile void __iomem *mem, \ + void *addr, unsigned int count) \ { \ volatile type *__addr = addr; \ \ @@ -162,7 +151,13 @@ static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ __BUILD_MEMORY_STRING(b, u8) __BUILD_MEMORY_STRING(w, u16) + +#define writesb __raw_writesb +#define writesw __raw_writesw #define writesl __raw_writesl + +#define readsb __raw_readsb +#define readsw __raw_readsw #define readsl __raw_readsl #define readb_relaxed(a) readb(a) @@ -170,25 +165,25 @@ __BUILD_MEMORY_STRING(w, u16) #define readl_relaxed(a) readl(a) /* Simple MMIO */ -#define ioread8(a) readb(a) -#define ioread16(a) readw(a) +#define ioread8(a) __raw_readb(a) +#define ioread16(a) __raw_readw(a) #define ioread16be(a) be16_to_cpu(__raw_readw((a))) -#define ioread32(a) readl(a) +#define ioread32(a) __raw_readl(a) #define ioread32be(a) be32_to_cpu(__raw_readl((a))) -#define iowrite8(v,a) writeb((v),(a)) -#define iowrite16(v,a) writew((v),(a)) +#define iowrite8(v,a) __raw_writeb((v),(a)) +#define iowrite16(v,a) __raw_writew((v),(a)) #define iowrite16be(v,a) __raw_writew(cpu_to_be16((v)),(a)) -#define iowrite32(v,a) writel((v),(a)) +#define iowrite32(v,a) __raw_writel((v),(a)) #define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) -#define ioread8_rep(a, d, c) readsb((a), (d), (c)) -#define ioread16_rep(a, d, c) readsw((a), (d), (c)) -#define ioread32_rep(a, d, c) readsl((a), (d), (c)) +#define ioread8_rep(a, d, c) __raw_readsb((a), (d), (c)) +#define ioread16_rep(a, d, c) __raw_readsw((a), (d), (c)) +#define ioread32_rep(a, d, c) __raw_readsl((a), (d), (c)) -#define iowrite8_rep(a, s, c) writesb((a), (s), (c)) -#define iowrite16_rep(a, s, c) writesw((a), (s), (c)) -#define iowrite32_rep(a, s, c) writesl((a), (s), (c)) +#define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c)) +#define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c)) +#define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c)) #define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ diff --git a/arch/sh/lib/io.c b/arch/sh/lib/io.c index 4f54ec4..88dfe6e 100644 --- a/arch/sh/lib/io.c +++ b/arch/sh/lib/io.c @@ -14,12 +14,12 @@ #include #include -void __raw_readsl(unsigned long addr, void *datap, int len) +void __raw_readsl(const void __iomem *addr, void *datap, int len) { u32 *data; for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--) - *data++ = ctrl_inl(addr); + *data++ = __raw_readl(addr); if (likely(len >= (0x20 >> 2))) { int tmp2, tmp3, tmp4, tmp5, tmp6; @@ -59,11 +59,11 @@ void __raw_readsl(unsigned long addr, void *datap, int len) } for (; len != 0; len--) - *data++ = ctrl_inl(addr); + *data++ = __raw_readl(addr); } EXPORT_SYMBOL(__raw_readsl); -void __raw_writesl(unsigned long addr, const void *data, int len) +void __raw_writesl(void __iomem *addr, const void *data, int len) { if (likely(len != 0)) { int tmp1; -- cgit v0.10.2 From 62429e03644833693e6f94afe537f252e2d3b475 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Oct 2008 15:19:10 +0900 Subject: sh: Use __raw_xxx() I/O accessors for INTC and IPR. Signed-off-by: Paul Mundt diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c index 94536d3..138efa4 100644 --- a/arch/sh/kernel/cpu/irq/intc.c +++ b/arch/sh/kernel/cpu/irq/intc.c @@ -86,24 +86,24 @@ static inline unsigned int set_field(unsigned int value, static void write_8(unsigned long addr, unsigned long h, unsigned long data) { - ctrl_outb(set_field(0, data, h), addr); + __raw_writeb(set_field(0, data, h), addr); } static void write_16(unsigned long addr, unsigned long h, unsigned long data) { - ctrl_outw(set_field(0, data, h), addr); + __raw_writew(set_field(0, data, h), addr); } static void write_32(unsigned long addr, unsigned long h, unsigned long data) { - ctrl_outl(set_field(0, data, h), addr); + __raw_writel(set_field(0, data, h), addr); } static void modify_8(unsigned long addr, unsigned long h, unsigned long data) { unsigned long flags; local_irq_save(flags); - ctrl_outb(set_field(ctrl_inb(addr), data, h), addr); + __raw_writeb(set_field(__raw_readb(addr), data, h), addr); local_irq_restore(flags); } @@ -111,7 +111,7 @@ static void modify_16(unsigned long addr, unsigned long h, unsigned long data) { unsigned long flags; local_irq_save(flags); - ctrl_outw(set_field(ctrl_inw(addr), data, h), addr); + __raw_writew(set_field(__raw_readw(addr), data, h), addr); local_irq_restore(flags); } @@ -119,7 +119,7 @@ static void modify_32(unsigned long addr, unsigned long h, unsigned long data) { unsigned long flags; local_irq_save(flags); - ctrl_outl(set_field(ctrl_inl(addr), data, h), addr); + __raw_writel(set_field(__raw_readl(addr), data, h), addr); local_irq_restore(flags); } @@ -246,16 +246,16 @@ static void intc_mask_ack(unsigned int irq) addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); switch (_INTC_FN(handle)) { case REG_FN_MODIFY_BASE + 0: /* 8bit */ - ctrl_inb(addr); - ctrl_outb(0xff ^ set_field(0, 1, handle), addr); + __raw_readb(addr); + __raw_writeb(0xff ^ set_field(0, 1, handle), addr); break; case REG_FN_MODIFY_BASE + 1: /* 16bit */ - ctrl_inw(addr); - ctrl_outw(0xffff ^ set_field(0, 1, handle), addr); + __raw_readw(addr); + __raw_writew(0xffff ^ set_field(0, 1, handle), addr); break; case REG_FN_MODIFY_BASE + 3: /* 32bit */ - ctrl_inl(addr); - ctrl_outl(0xffffffff ^ set_field(0, 1, handle), addr); + __raw_readl(addr); + __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr); break; default: BUG(); diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c index 56ea7b2..3eb17ee 100644 --- a/arch/sh/kernel/cpu/irq/ipr.c +++ b/arch/sh/kernel/cpu/irq/ipr.c @@ -33,7 +33,7 @@ static void disable_ipr_irq(unsigned int irq) struct ipr_data *p = get_irq_chip_data(irq); unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; /* Set the priority in IPR to 0 */ - ctrl_outw(ctrl_inw(addr) & (0xffff ^ (0xf << p->shift)), addr); + __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); } static void enable_ipr_irq(unsigned int irq) @@ -41,7 +41,7 @@ static void enable_ipr_irq(unsigned int irq) struct ipr_data *p = get_irq_chip_data(irq); unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; /* Set priority in IPR back to original value */ - ctrl_outw(ctrl_inw(addr) | (p->priority << p->shift), addr); + __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr); } /* -- cgit v0.10.2 From 7ff731aeba1cdac473c818a9884eb94ddad18e7f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Oct 2008 15:46:58 +0900 Subject: serial: sh-sci: Handle the general UPF_IOREMAP case. Presently we don't do much with UPF_IOREMAP other than special case it for SH-5's onchip_remap() on the early console. Tie this in generically for platforms that need the remap. Signed-off-by: Paul Mundt diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index f5aebc9..ac658a7 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -3,7 +3,7 @@ * * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) * - * Copyright (C) 2002 - 2006 Paul Mundt + * Copyright (C) 2002 - 2008 Paul Mundt * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). * * based off of the old drivers/char/sh-sci.c by: @@ -46,6 +46,7 @@ #include #include #include +#include #ifdef CONFIG_SUPERH #include @@ -1145,12 +1146,16 @@ static void sci_config_port(struct uart_port *port, int flags) break; } -#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) - if (port->mapbase == 0) + if (port->flags & UPF_IOREMAP && !port->membase) { +#if defined(CONFIG_SUPERH64) port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF"); - - port->membase = (void __iomem *)port->mapbase; + port->membase = (void __iomem *)port->mapbase; +#else + port->membase = ioremap_nocache(port->mapbase, 0x40); #endif + + printk(KERN_ERR "sci: can't remap port#%d\n", port->line); + } } static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) @@ -1436,7 +1441,7 @@ static struct uart_driver sci_uart_driver = { static int __devinit sci_probe(struct platform_device *dev) { struct plat_sci_port *p = dev->dev.platform_data; - int i; + int i, ret = -EINVAL; for (i = 0; p && p->flags != 0; p++, i++) { struct sci_port *sciport = &sci_ports[i]; @@ -1453,12 +1458,22 @@ static int __devinit sci_probe(struct platform_device *dev) sciport->port.mapbase = p->mapbase; - /* - * For the simple (and majority of) cases where we don't need - * to do any remapping, just cast the cookie directly. - */ - if (p->mapbase && !p->membase && !(p->flags & UPF_IOREMAP)) - p->membase = (void __iomem *)p->mapbase; + if (p->mapbase && !p->membase) { + if (p->flags & UPF_IOREMAP) { + p->membase = ioremap_nocache(p->mapbase, 0x40); + if (IS_ERR(p->membase)) { + ret = PTR_ERR(p->membase); + goto err_unreg; + } + } else { + /* + * For the simple (and majority of) cases + * where we don't need to do any remapping, + * just cast the cookie directly. + */ + p->membase = (void __iomem *)p->mapbase; + } + } sciport->port.membase = p->membase; @@ -1489,6 +1504,12 @@ static int __devinit sci_probe(struct platform_device *dev) #endif return 0; + +err_unreg: + for (i = i - 1; i >= 0; i--) + uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port); + + return ret; } static int __devexit sci_remove(struct platform_device *dev) diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 8a0749e..2b4c1df 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -320,18 +320,16 @@ #define SCI_EVENT_WRITE_WAKEUP 0 #define SCI_IN(size, offset) \ - unsigned int addr = port->mapbase + (offset); \ if ((size) == 8) { \ - return ctrl_inb(addr); \ + return ioread8(port->membase + (offset)); \ } else { \ - return ctrl_inw(addr); \ + return ioread16(port->membase + (offset)); \ } #define SCI_OUT(size, offset, value) \ - unsigned int addr = port->mapbase + (offset); \ if ((size) == 8) { \ - ctrl_outb(value, addr); \ + iowrite8(value, port->membase + (offset)); \ } else if ((size) == 16) { \ - ctrl_outw(value, addr); \ + iowrite16(value, port->membase + (offset)); \ } #define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ -- cgit v0.10.2 From bbfbd8b151fe35c9a1180a7f5254c5d6b8387cc0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Oct 2008 16:13:54 +0900 Subject: sh: Move the shared INTC code out to drivers/sh/ The INTC code will be re-used across different architectures, so move this out to drivers/sh/ and include/linux/sh_intc.h respectively. Signed-off-by: Paul Mundt diff --git a/arch/sh/include/asm/hw_irq.h b/arch/sh/include/asm/hw_irq.h index d557b00..603cdde 100644 --- a/arch/sh/include/asm/hw_irq.h +++ b/arch/sh/include/asm/hw_irq.h @@ -2,6 +2,7 @@ #define __ASM_SH_HW_IRQ_H #include +#include #include extern atomic_t irq_err_count; @@ -23,101 +24,12 @@ struct ipr_desc { void register_ipr_controller(struct ipr_desc *); -typedef unsigned char intc_enum; - -struct intc_vect { - intc_enum enum_id; - unsigned short vect; -}; - -#define INTC_VECT(enum_id, vect) { enum_id, vect } -#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq)) - -struct intc_group { - intc_enum enum_id; - intc_enum enum_ids[32]; -}; - -#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } - -struct intc_mask_reg { - unsigned long set_reg, clr_reg, reg_width; - intc_enum enum_ids[32]; -#ifdef CONFIG_SMP - unsigned long smp; -#endif -}; - -struct intc_prio_reg { - unsigned long set_reg, clr_reg, reg_width, field_width; - intc_enum enum_ids[16]; -#ifdef CONFIG_SMP - unsigned long smp; -#endif -}; - -struct intc_sense_reg { - unsigned long reg, reg_width, field_width; - intc_enum enum_ids[16]; -}; - -#ifdef CONFIG_SMP -#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8) -#else -#define INTC_SMP(stride, nr) -#endif - -struct intc_desc { - struct intc_vect *vectors; - unsigned int nr_vectors; - struct intc_group *groups; - unsigned int nr_groups; - struct intc_mask_reg *mask_regs; - unsigned int nr_mask_regs; - struct intc_prio_reg *prio_regs; - unsigned int nr_prio_regs; - struct intc_sense_reg *sense_regs; - unsigned int nr_sense_regs; - char *name; -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) - struct intc_mask_reg *ack_regs; - unsigned int nr_ack_regs; -#endif -}; - -#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) -#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \ - mask_regs, prio_regs, sense_regs) \ -struct intc_desc symbol __initdata = { \ - _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ - _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ - _INTC_ARRAY(sense_regs), \ - chipname, \ -} - -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) -#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ - mask_regs, prio_regs, sense_regs, ack_regs) \ -struct intc_desc symbol __initdata = { \ - _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ - _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ - _INTC_ARRAY(sense_regs), \ - chipname, \ - _INTC_ARRAY(ack_regs), \ -} -#endif - -void __init register_intc_controller(struct intc_desc *desc); -int intc_set_priority(unsigned int irq, unsigned int prio); - void __init plat_irq_setup(void); -#ifdef CONFIG_CPU_SH3 void __init plat_irq_setup_sh3(void); -#endif +void __init plat_irq_setup_pins(int mode); enum { IRQ_MODE_IRQ, IRQ_MODE_IRQ7654, IRQ_MODE_IRQ3210, IRQ_MODE_IRL7654_MASK, IRQ_MODE_IRL3210_MASK, IRQ_MODE_IRL7654, IRQ_MODE_IRL3210 }; -void __init plat_irq_setup_pins(int mode); #endif /* __ASM_SH_HW_IRQ_H */ diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile index 462a8f6..f0c7025 100644 --- a/arch/sh/kernel/cpu/irq/Makefile +++ b/arch/sh/kernel/cpu/irq/Makefile @@ -1,8 +1,6 @@ # # Makefile for the Linux/SuperH CPU-specifc IRQ handlers. # -obj-y += intc.o - obj-$(CONFIG_SUPERH32) += imask.o obj-$(CONFIG_CPU_SH5) += intc-sh5.o obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c deleted file mode 100644 index 138efa4..0000000 --- a/arch/sh/kernel/cpu/irq/intc.c +++ /dev/null @@ -1,712 +0,0 @@ -/* - * Shared interrupt handling code for IPR and INTC2 types of IRQs. - * - * Copyright (C) 2007, 2008 Magnus Damm - * - * Based on intc2.c and ipr.c - * - * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi - * Copyright (C) 2000 Kazumoto Kojima - * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) - * Copyright (C) 2003 Takashi Kusuda - * Copyright (C) 2005, 2006 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include -#include -#include -#include - -#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ - ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ - ((addr_e) << 16) | ((addr_d << 24))) - -#define _INTC_SHIFT(h) (h & 0x1f) -#define _INTC_WIDTH(h) ((h >> 5) & 0xf) -#define _INTC_FN(h) ((h >> 9) & 0xf) -#define _INTC_MODE(h) ((h >> 13) & 0x7) -#define _INTC_ADDR_E(h) ((h >> 16) & 0xff) -#define _INTC_ADDR_D(h) ((h >> 24) & 0xff) - -struct intc_handle_int { - unsigned int irq; - unsigned long handle; -}; - -struct intc_desc_int { - unsigned long *reg; -#ifdef CONFIG_SMP - unsigned long *smp; -#endif - unsigned int nr_reg; - struct intc_handle_int *prio; - unsigned int nr_prio; - struct intc_handle_int *sense; - unsigned int nr_sense; - struct irq_chip chip; -}; - -#ifdef CONFIG_SMP -#define IS_SMP(x) x.smp -#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) -#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1) -#else -#define IS_SMP(x) 0 -#define INTC_REG(d, x, c) (d->reg[(x)]) -#define SMP_NR(d, x) 1 -#endif - -static unsigned int intc_prio_level[NR_IRQS]; /* for now */ -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) -static unsigned long ack_handle[NR_IRQS]; -#endif - -static inline struct intc_desc_int *get_intc_desc(unsigned int irq) -{ - struct irq_chip *chip = get_irq_chip(irq); - return (void *)((char *)chip - offsetof(struct intc_desc_int, chip)); -} - -static inline unsigned int set_field(unsigned int value, - unsigned int field_value, - unsigned int handle) -{ - unsigned int width = _INTC_WIDTH(handle); - unsigned int shift = _INTC_SHIFT(handle); - - value &= ~(((1 << width) - 1) << shift); - value |= field_value << shift; - return value; -} - -static void write_8(unsigned long addr, unsigned long h, unsigned long data) -{ - __raw_writeb(set_field(0, data, h), addr); -} - -static void write_16(unsigned long addr, unsigned long h, unsigned long data) -{ - __raw_writew(set_field(0, data, h), addr); -} - -static void write_32(unsigned long addr, unsigned long h, unsigned long data) -{ - __raw_writel(set_field(0, data, h), addr); -} - -static void modify_8(unsigned long addr, unsigned long h, unsigned long data) -{ - unsigned long flags; - local_irq_save(flags); - __raw_writeb(set_field(__raw_readb(addr), data, h), addr); - local_irq_restore(flags); -} - -static void modify_16(unsigned long addr, unsigned long h, unsigned long data) -{ - unsigned long flags; - local_irq_save(flags); - __raw_writew(set_field(__raw_readw(addr), data, h), addr); - local_irq_restore(flags); -} - -static void modify_32(unsigned long addr, unsigned long h, unsigned long data) -{ - unsigned long flags; - local_irq_save(flags); - __raw_writel(set_field(__raw_readl(addr), data, h), addr); - local_irq_restore(flags); -} - -enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 }; - -static void (*intc_reg_fns[])(unsigned long addr, - unsigned long h, - unsigned long data) = { - [REG_FN_WRITE_BASE + 0] = write_8, - [REG_FN_WRITE_BASE + 1] = write_16, - [REG_FN_WRITE_BASE + 3] = write_32, - [REG_FN_MODIFY_BASE + 0] = modify_8, - [REG_FN_MODIFY_BASE + 1] = modify_16, - [REG_FN_MODIFY_BASE + 3] = modify_32, -}; - -enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */ - MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */ - MODE_DUAL_REG, /* Two registers, set bit to enable / disable */ - MODE_PRIO_REG, /* Priority value written to enable interrupt */ - MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */ -}; - -static void intc_mode_field(unsigned long addr, - unsigned long handle, - void (*fn)(unsigned long, - unsigned long, - unsigned long), - unsigned int irq) -{ - fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1)); -} - -static void intc_mode_zero(unsigned long addr, - unsigned long handle, - void (*fn)(unsigned long, - unsigned long, - unsigned long), - unsigned int irq) -{ - fn(addr, handle, 0); -} - -static void intc_mode_prio(unsigned long addr, - unsigned long handle, - void (*fn)(unsigned long, - unsigned long, - unsigned long), - unsigned int irq) -{ - fn(addr, handle, intc_prio_level[irq]); -} - -static void (*intc_enable_fns[])(unsigned long addr, - unsigned long handle, - void (*fn)(unsigned long, - unsigned long, - unsigned long), - unsigned int irq) = { - [MODE_ENABLE_REG] = intc_mode_field, - [MODE_MASK_REG] = intc_mode_zero, - [MODE_DUAL_REG] = intc_mode_field, - [MODE_PRIO_REG] = intc_mode_prio, - [MODE_PCLR_REG] = intc_mode_prio, -}; - -static void (*intc_disable_fns[])(unsigned long addr, - unsigned long handle, - void (*fn)(unsigned long, - unsigned long, - unsigned long), - unsigned int irq) = { - [MODE_ENABLE_REG] = intc_mode_zero, - [MODE_MASK_REG] = intc_mode_field, - [MODE_DUAL_REG] = intc_mode_field, - [MODE_PRIO_REG] = intc_mode_zero, - [MODE_PCLR_REG] = intc_mode_field, -}; - -static inline void _intc_enable(unsigned int irq, unsigned long handle) -{ - struct intc_desc_int *d = get_intc_desc(irq); - unsigned long addr; - unsigned int cpu; - - for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { - addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); - intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ - [_INTC_FN(handle)], irq); - } -} - -static void intc_enable(unsigned int irq) -{ - _intc_enable(irq, (unsigned long)get_irq_chip_data(irq)); -} - -static void intc_disable(unsigned int irq) -{ - struct intc_desc_int *d = get_intc_desc(irq); - unsigned long handle = (unsigned long) get_irq_chip_data(irq); - unsigned long addr; - unsigned int cpu; - - for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { - addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); - intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ - [_INTC_FN(handle)], irq); - } -} - -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) -static void intc_mask_ack(unsigned int irq) -{ - struct intc_desc_int *d = get_intc_desc(irq); - unsigned long handle = ack_handle[irq]; - unsigned long addr; - - intc_disable(irq); - - /* read register and write zero only to the assocaited bit */ - - if (handle) { - addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); - switch (_INTC_FN(handle)) { - case REG_FN_MODIFY_BASE + 0: /* 8bit */ - __raw_readb(addr); - __raw_writeb(0xff ^ set_field(0, 1, handle), addr); - break; - case REG_FN_MODIFY_BASE + 1: /* 16bit */ - __raw_readw(addr); - __raw_writew(0xffff ^ set_field(0, 1, handle), addr); - break; - case REG_FN_MODIFY_BASE + 3: /* 32bit */ - __raw_readl(addr); - __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr); - break; - default: - BUG(); - break; - } - } -} -#endif - -static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, - unsigned int nr_hp, - unsigned int irq) -{ - int i; - - /* this doesn't scale well, but... - * - * this function should only be used for cerain uncommon - * operations such as intc_set_priority() and intc_set_sense() - * and in those rare cases performance doesn't matter that much. - * keeping the memory footprint low is more important. - * - * one rather simple way to speed this up and still keep the - * memory footprint down is to make sure the array is sorted - * and then perform a bisect to lookup the irq. - */ - - for (i = 0; i < nr_hp; i++) { - if ((hp + i)->irq != irq) - continue; - - return hp + i; - } - - return NULL; -} - -int intc_set_priority(unsigned int irq, unsigned int prio) -{ - struct intc_desc_int *d = get_intc_desc(irq); - struct intc_handle_int *ihp; - - if (!intc_prio_level[irq] || prio <= 1) - return -EINVAL; - - ihp = intc_find_irq(d->prio, d->nr_prio, irq); - if (ihp) { - if (prio >= (1 << _INTC_WIDTH(ihp->handle))) - return -EINVAL; - - intc_prio_level[irq] = prio; - - /* - * only set secondary masking method directly - * primary masking method is using intc_prio_level[irq] - * priority level will be set during next enable() - */ - - if (_INTC_FN(ihp->handle) != REG_FN_ERR) - _intc_enable(irq, ihp->handle); - } - return 0; -} - -#define VALID(x) (x | 0x80) - -static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { - [IRQ_TYPE_EDGE_FALLING] = VALID(0), - [IRQ_TYPE_EDGE_RISING] = VALID(1), - [IRQ_TYPE_LEVEL_LOW] = VALID(2), - /* SH7706, SH7707 and SH7709 do not support high level triggered */ -#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ - !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ - !defined(CONFIG_CPU_SUBTYPE_SH7709) - [IRQ_TYPE_LEVEL_HIGH] = VALID(3), -#endif -}; - -static int intc_set_sense(unsigned int irq, unsigned int type) -{ - struct intc_desc_int *d = get_intc_desc(irq); - unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK]; - struct intc_handle_int *ihp; - unsigned long addr; - - if (!value) - return -EINVAL; - - ihp = intc_find_irq(d->sense, d->nr_sense, irq); - if (ihp) { - addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); - intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); - } - return 0; -} - -static unsigned int __init intc_get_reg(struct intc_desc_int *d, - unsigned long address) -{ - unsigned int k; - - for (k = 0; k < d->nr_reg; k++) { - if (d->reg[k] == address) - return k; - } - - BUG(); - return 0; -} - -static intc_enum __init intc_grp_id(struct intc_desc *desc, - intc_enum enum_id) -{ - struct intc_group *g = desc->groups; - unsigned int i, j; - - for (i = 0; g && enum_id && i < desc->nr_groups; i++) { - g = desc->groups + i; - - for (j = 0; g->enum_ids[j]; j++) { - if (g->enum_ids[j] != enum_id) - continue; - - return g->enum_id; - } - } - - return 0; -} - -static unsigned int __init intc_mask_data(struct intc_desc *desc, - struct intc_desc_int *d, - intc_enum enum_id, int do_grps) -{ - struct intc_mask_reg *mr = desc->mask_regs; - unsigned int i, j, fn, mode; - unsigned long reg_e, reg_d; - - for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) { - mr = desc->mask_regs + i; - - for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { - if (mr->enum_ids[j] != enum_id) - continue; - - if (mr->set_reg && mr->clr_reg) { - fn = REG_FN_WRITE_BASE; - mode = MODE_DUAL_REG; - reg_e = mr->clr_reg; - reg_d = mr->set_reg; - } else { - fn = REG_FN_MODIFY_BASE; - if (mr->set_reg) { - mode = MODE_ENABLE_REG; - reg_e = mr->set_reg; - reg_d = mr->set_reg; - } else { - mode = MODE_MASK_REG; - reg_e = mr->clr_reg; - reg_d = mr->clr_reg; - } - } - - fn += (mr->reg_width >> 3) - 1; - return _INTC_MK(fn, mode, - intc_get_reg(d, reg_e), - intc_get_reg(d, reg_d), - 1, - (mr->reg_width - 1) - j); - } - } - - if (do_grps) - return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); - - return 0; -} - -static unsigned int __init intc_prio_data(struct intc_desc *desc, - struct intc_desc_int *d, - intc_enum enum_id, int do_grps) -{ - struct intc_prio_reg *pr = desc->prio_regs; - unsigned int i, j, fn, mode, bit; - unsigned long reg_e, reg_d; - - for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) { - pr = desc->prio_regs + i; - - for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) { - if (pr->enum_ids[j] != enum_id) - continue; - - if (pr->set_reg && pr->clr_reg) { - fn = REG_FN_WRITE_BASE; - mode = MODE_PCLR_REG; - reg_e = pr->set_reg; - reg_d = pr->clr_reg; - } else { - fn = REG_FN_MODIFY_BASE; - mode = MODE_PRIO_REG; - if (!pr->set_reg) - BUG(); - reg_e = pr->set_reg; - reg_d = pr->set_reg; - } - - fn += (pr->reg_width >> 3) - 1; - - BUG_ON((j + 1) * pr->field_width > pr->reg_width); - - bit = pr->reg_width - ((j + 1) * pr->field_width); - - return _INTC_MK(fn, mode, - intc_get_reg(d, reg_e), - intc_get_reg(d, reg_d), - pr->field_width, bit); - } - } - - if (do_grps) - return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); - - return 0; -} - -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) -static unsigned int __init intc_ack_data(struct intc_desc *desc, - struct intc_desc_int *d, - intc_enum enum_id) -{ - struct intc_mask_reg *mr = desc->ack_regs; - unsigned int i, j, fn, mode; - unsigned long reg_e, reg_d; - - for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { - mr = desc->ack_regs + i; - - for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { - if (mr->enum_ids[j] != enum_id) - continue; - - fn = REG_FN_MODIFY_BASE; - mode = MODE_ENABLE_REG; - reg_e = mr->set_reg; - reg_d = mr->set_reg; - - fn += (mr->reg_width >> 3) - 1; - return _INTC_MK(fn, mode, - intc_get_reg(d, reg_e), - intc_get_reg(d, reg_d), - 1, - (mr->reg_width - 1) - j); - } - } - - return 0; -} -#endif - -static unsigned int __init intc_sense_data(struct intc_desc *desc, - struct intc_desc_int *d, - intc_enum enum_id) -{ - struct intc_sense_reg *sr = desc->sense_regs; - unsigned int i, j, fn, bit; - - for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) { - sr = desc->sense_regs + i; - - for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { - if (sr->enum_ids[j] != enum_id) - continue; - - fn = REG_FN_MODIFY_BASE; - fn += (sr->reg_width >> 3) - 1; - - BUG_ON((j + 1) * sr->field_width > sr->reg_width); - - bit = sr->reg_width - ((j + 1) * sr->field_width); - - return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), - 0, sr->field_width, bit); - } - } - - return 0; -} - -static void __init intc_register_irq(struct intc_desc *desc, - struct intc_desc_int *d, - intc_enum enum_id, - unsigned int irq) -{ - struct intc_handle_int *hp; - unsigned int data[2], primary; - - /* Prefer single interrupt source bitmap over other combinations: - * 1. bitmap, single interrupt source - * 2. priority, single interrupt source - * 3. bitmap, multiple interrupt sources (groups) - * 4. priority, multiple interrupt sources (groups) - */ - - data[0] = intc_mask_data(desc, d, enum_id, 0); - data[1] = intc_prio_data(desc, d, enum_id, 0); - - primary = 0; - if (!data[0] && data[1]) - primary = 1; - - data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1); - data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1); - - if (!data[primary]) - primary ^= 1; - - BUG_ON(!data[primary]); /* must have primary masking method */ - - disable_irq_nosync(irq); - set_irq_chip_and_handler_name(irq, &d->chip, - handle_level_irq, "level"); - set_irq_chip_data(irq, (void *)data[primary]); - - /* set priority level - * - this needs to be at least 2 for 5-bit priorities on 7780 - */ - intc_prio_level[irq] = 2; - - /* enable secondary masking method if present */ - if (data[!primary]) - _intc_enable(irq, data[!primary]); - - /* add irq to d->prio list if priority is available */ - if (data[1]) { - hp = d->prio + d->nr_prio; - hp->irq = irq; - hp->handle = data[1]; - - if (primary) { - /* - * only secondary priority should access registers, so - * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() - */ - - hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); - hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); - } - d->nr_prio++; - } - - /* add irq to d->sense list if sense is available */ - data[0] = intc_sense_data(desc, d, enum_id); - if (data[0]) { - (d->sense + d->nr_sense)->irq = irq; - (d->sense + d->nr_sense)->handle = data[0]; - d->nr_sense++; - } - - /* irq should be disabled by default */ - d->chip.mask(irq); - -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) - if (desc->ack_regs) - ack_handle[irq] = intc_ack_data(desc, d, enum_id); -#endif -} - -static unsigned int __init save_reg(struct intc_desc_int *d, - unsigned int cnt, - unsigned long value, - unsigned int smp) -{ - if (value) { - d->reg[cnt] = value; -#ifdef CONFIG_SMP - d->smp[cnt] = smp; -#endif - return 1; - } - - return 0; -} - - -void __init register_intc_controller(struct intc_desc *desc) -{ - unsigned int i, k, smp; - struct intc_desc_int *d; - - d = alloc_bootmem(sizeof(*d)); - - d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; - d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; - d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; - -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) - d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; -#endif - d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg)); -#ifdef CONFIG_SMP - d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp)); -#endif - k = 0; - - if (desc->mask_regs) { - for (i = 0; i < desc->nr_mask_regs; i++) { - smp = IS_SMP(desc->mask_regs[i]); - k += save_reg(d, k, desc->mask_regs[i].set_reg, smp); - k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp); - } - } - - if (desc->prio_regs) { - d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio)); - - for (i = 0; i < desc->nr_prio_regs; i++) { - smp = IS_SMP(desc->prio_regs[i]); - k += save_reg(d, k, desc->prio_regs[i].set_reg, smp); - k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp); - } - } - - if (desc->sense_regs) { - d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense)); - - for (i = 0; i < desc->nr_sense_regs; i++) { - k += save_reg(d, k, desc->sense_regs[i].reg, 0); - } - } - - d->chip.name = desc->name; - d->chip.mask = intc_disable; - d->chip.unmask = intc_enable; - d->chip.mask_ack = intc_disable; - d->chip.set_type = intc_set_sense; - -#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) - if (desc->ack_regs) { - for (i = 0; i < desc->nr_ack_regs; i++) - k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); - - d->chip.mask_ack = intc_mask_ack; - } -#endif - - BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ - - for (i = 0; i < desc->nr_vectors; i++) { - struct intc_vect *vect = desc->vectors + i; - - intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); - } -} diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index a96f4a8..6a025ce 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile @@ -1,6 +1,6 @@ # # Makefile for the SuperH specific drivers. # - obj-$(CONFIG_SUPERHYWAY) += superhyway/ obj-$(CONFIG_MAPLE) += maple/ +obj-y += intc.o diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c new file mode 100644 index 0000000..58d24c5 --- /dev/null +++ b/drivers/sh/intc.c @@ -0,0 +1,713 @@ +/* + * Shared interrupt handling code for IPR and INTC2 types of IRQs. + * + * Copyright (C) 2007, 2008 Magnus Damm + * + * Based on intc2.c and ipr.c + * + * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi + * Copyright (C) 2000 Kazumoto Kojima + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * Copyright (C) 2003 Takashi Kusuda + * Copyright (C) 2005, 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ + ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ + ((addr_e) << 16) | ((addr_d << 24))) + +#define _INTC_SHIFT(h) (h & 0x1f) +#define _INTC_WIDTH(h) ((h >> 5) & 0xf) +#define _INTC_FN(h) ((h >> 9) & 0xf) +#define _INTC_MODE(h) ((h >> 13) & 0x7) +#define _INTC_ADDR_E(h) ((h >> 16) & 0xff) +#define _INTC_ADDR_D(h) ((h >> 24) & 0xff) + +struct intc_handle_int { + unsigned int irq; + unsigned long handle; +}; + +struct intc_desc_int { + unsigned long *reg; +#ifdef CONFIG_SMP + unsigned long *smp; +#endif + unsigned int nr_reg; + struct intc_handle_int *prio; + unsigned int nr_prio; + struct intc_handle_int *sense; + unsigned int nr_sense; + struct irq_chip chip; +}; + +#ifdef CONFIG_SMP +#define IS_SMP(x) x.smp +#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) +#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1) +#else +#define IS_SMP(x) 0 +#define INTC_REG(d, x, c) (d->reg[(x)]) +#define SMP_NR(d, x) 1 +#endif + +static unsigned int intc_prio_level[NR_IRQS]; /* for now */ +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) +static unsigned long ack_handle[NR_IRQS]; +#endif + +static inline struct intc_desc_int *get_intc_desc(unsigned int irq) +{ + struct irq_chip *chip = get_irq_chip(irq); + return (void *)((char *)chip - offsetof(struct intc_desc_int, chip)); +} + +static inline unsigned int set_field(unsigned int value, + unsigned int field_value, + unsigned int handle) +{ + unsigned int width = _INTC_WIDTH(handle); + unsigned int shift = _INTC_SHIFT(handle); + + value &= ~(((1 << width) - 1) << shift); + value |= field_value << shift; + return value; +} + +static void write_8(unsigned long addr, unsigned long h, unsigned long data) +{ + __raw_writeb(set_field(0, data, h), addr); +} + +static void write_16(unsigned long addr, unsigned long h, unsigned long data) +{ + __raw_writew(set_field(0, data, h), addr); +} + +static void write_32(unsigned long addr, unsigned long h, unsigned long data) +{ + __raw_writel(set_field(0, data, h), addr); +} + +static void modify_8(unsigned long addr, unsigned long h, unsigned long data) +{ + unsigned long flags; + local_irq_save(flags); + __raw_writeb(set_field(__raw_readb(addr), data, h), addr); + local_irq_restore(flags); +} + +static void modify_16(unsigned long addr, unsigned long h, unsigned long data) +{ + unsigned long flags; + local_irq_save(flags); + __raw_writew(set_field(__raw_readw(addr), data, h), addr); + local_irq_restore(flags); +} + +static void modify_32(unsigned long addr, unsigned long h, unsigned long data) +{ + unsigned long flags; + local_irq_save(flags); + __raw_writel(set_field(__raw_readl(addr), data, h), addr); + local_irq_restore(flags); +} + +enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 }; + +static void (*intc_reg_fns[])(unsigned long addr, + unsigned long h, + unsigned long data) = { + [REG_FN_WRITE_BASE + 0] = write_8, + [REG_FN_WRITE_BASE + 1] = write_16, + [REG_FN_WRITE_BASE + 3] = write_32, + [REG_FN_MODIFY_BASE + 0] = modify_8, + [REG_FN_MODIFY_BASE + 1] = modify_16, + [REG_FN_MODIFY_BASE + 3] = modify_32, +}; + +enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */ + MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */ + MODE_DUAL_REG, /* Two registers, set bit to enable / disable */ + MODE_PRIO_REG, /* Priority value written to enable interrupt */ + MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */ +}; + +static void intc_mode_field(unsigned long addr, + unsigned long handle, + void (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) +{ + fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1)); +} + +static void intc_mode_zero(unsigned long addr, + unsigned long handle, + void (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) +{ + fn(addr, handle, 0); +} + +static void intc_mode_prio(unsigned long addr, + unsigned long handle, + void (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) +{ + fn(addr, handle, intc_prio_level[irq]); +} + +static void (*intc_enable_fns[])(unsigned long addr, + unsigned long handle, + void (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) = { + [MODE_ENABLE_REG] = intc_mode_field, + [MODE_MASK_REG] = intc_mode_zero, + [MODE_DUAL_REG] = intc_mode_field, + [MODE_PRIO_REG] = intc_mode_prio, + [MODE_PCLR_REG] = intc_mode_prio, +}; + +static void (*intc_disable_fns[])(unsigned long addr, + unsigned long handle, + void (*fn)(unsigned long, + unsigned long, + unsigned long), + unsigned int irq) = { + [MODE_ENABLE_REG] = intc_mode_zero, + [MODE_MASK_REG] = intc_mode_field, + [MODE_DUAL_REG] = intc_mode_field, + [MODE_PRIO_REG] = intc_mode_zero, + [MODE_PCLR_REG] = intc_mode_field, +}; + +static inline void _intc_enable(unsigned int irq, unsigned long handle) +{ + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long addr; + unsigned int cpu; + + for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { + addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); + intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ + [_INTC_FN(handle)], irq); + } +} + +static void intc_enable(unsigned int irq) +{ + _intc_enable(irq, (unsigned long)get_irq_chip_data(irq)); +} + +static void intc_disable(unsigned int irq) +{ + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long handle = (unsigned long) get_irq_chip_data(irq); + unsigned long addr; + unsigned int cpu; + + for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { + addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); + intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ + [_INTC_FN(handle)], irq); + } +} + +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) +static void intc_mask_ack(unsigned int irq) +{ + struct intc_desc_int *d = get_intc_desc(irq); + unsigned long handle = ack_handle[irq]; + unsigned long addr; + + intc_disable(irq); + + /* read register and write zero only to the assocaited bit */ + + if (handle) { + addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); + switch (_INTC_FN(handle)) { + case REG_FN_MODIFY_BASE + 0: /* 8bit */ + __raw_readb(addr); + __raw_writeb(0xff ^ set_field(0, 1, handle), addr); + break; + case REG_FN_MODIFY_BASE + 1: /* 16bit */ + __raw_readw(addr); + __raw_writew(0xffff ^ set_field(0, 1, handle), addr); + break; + case REG_FN_MODIFY_BASE + 3: /* 32bit */ + __raw_readl(addr); + __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr); + break; + default: + BUG(); + break; + } + } +} +#endif + +static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, + unsigned int nr_hp, + unsigned int irq) +{ + int i; + + /* this doesn't scale well, but... + * + * this function should only be used for cerain uncommon + * operations such as intc_set_priority() and intc_set_sense() + * and in those rare cases performance doesn't matter that much. + * keeping the memory footprint low is more important. + * + * one rather simple way to speed this up and still keep the + * memory footprint down is to make sure the array is sorted + * and then perform a bisect to lookup the irq. + */ + + for (i = 0; i < nr_hp; i++) { + if ((hp + i)->irq != irq) + continue; + + return hp + i; + } + + return NULL; +} + +int intc_set_priority(unsigned int irq, unsigned int prio) +{ + struct intc_desc_int *d = get_intc_desc(irq); + struct intc_handle_int *ihp; + + if (!intc_prio_level[irq] || prio <= 1) + return -EINVAL; + + ihp = intc_find_irq(d->prio, d->nr_prio, irq); + if (ihp) { + if (prio >= (1 << _INTC_WIDTH(ihp->handle))) + return -EINVAL; + + intc_prio_level[irq] = prio; + + /* + * only set secondary masking method directly + * primary masking method is using intc_prio_level[irq] + * priority level will be set during next enable() + */ + + if (_INTC_FN(ihp->handle) != REG_FN_ERR) + _intc_enable(irq, ihp->handle); + } + return 0; +} + +#define VALID(x) (x | 0x80) + +static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { + [IRQ_TYPE_EDGE_FALLING] = VALID(0), + [IRQ_TYPE_EDGE_RISING] = VALID(1), + [IRQ_TYPE_LEVEL_LOW] = VALID(2), + /* SH7706, SH7707 and SH7709 do not support high level triggered */ +#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ + !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ + !defined(CONFIG_CPU_SUBTYPE_SH7709) + [IRQ_TYPE_LEVEL_HIGH] = VALID(3), +#endif +}; + +static int intc_set_sense(unsigned int irq, unsigned int type) +{ + struct intc_desc_int *d = get_intc_desc(irq); + unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK]; + struct intc_handle_int *ihp; + unsigned long addr; + + if (!value) + return -EINVAL; + + ihp = intc_find_irq(d->sense, d->nr_sense, irq); + if (ihp) { + addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); + intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); + } + return 0; +} + +static unsigned int __init intc_get_reg(struct intc_desc_int *d, + unsigned long address) +{ + unsigned int k; + + for (k = 0; k < d->nr_reg; k++) { + if (d->reg[k] == address) + return k; + } + + BUG(); + return 0; +} + +static intc_enum __init intc_grp_id(struct intc_desc *desc, + intc_enum enum_id) +{ + struct intc_group *g = desc->groups; + unsigned int i, j; + + for (i = 0; g && enum_id && i < desc->nr_groups; i++) { + g = desc->groups + i; + + for (j = 0; g->enum_ids[j]; j++) { + if (g->enum_ids[j] != enum_id) + continue; + + return g->enum_id; + } + } + + return 0; +} + +static unsigned int __init intc_mask_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, int do_grps) +{ + struct intc_mask_reg *mr = desc->mask_regs; + unsigned int i, j, fn, mode; + unsigned long reg_e, reg_d; + + for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) { + mr = desc->mask_regs + i; + + for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { + if (mr->enum_ids[j] != enum_id) + continue; + + if (mr->set_reg && mr->clr_reg) { + fn = REG_FN_WRITE_BASE; + mode = MODE_DUAL_REG; + reg_e = mr->clr_reg; + reg_d = mr->set_reg; + } else { + fn = REG_FN_MODIFY_BASE; + if (mr->set_reg) { + mode = MODE_ENABLE_REG; + reg_e = mr->set_reg; + reg_d = mr->set_reg; + } else { + mode = MODE_MASK_REG; + reg_e = mr->clr_reg; + reg_d = mr->clr_reg; + } + } + + fn += (mr->reg_width >> 3) - 1; + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + 1, + (mr->reg_width - 1) - j); + } + } + + if (do_grps) + return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); + + return 0; +} + +static unsigned int __init intc_prio_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, int do_grps) +{ + struct intc_prio_reg *pr = desc->prio_regs; + unsigned int i, j, fn, mode, bit; + unsigned long reg_e, reg_d; + + for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) { + pr = desc->prio_regs + i; + + for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) { + if (pr->enum_ids[j] != enum_id) + continue; + + if (pr->set_reg && pr->clr_reg) { + fn = REG_FN_WRITE_BASE; + mode = MODE_PCLR_REG; + reg_e = pr->set_reg; + reg_d = pr->clr_reg; + } else { + fn = REG_FN_MODIFY_BASE; + mode = MODE_PRIO_REG; + if (!pr->set_reg) + BUG(); + reg_e = pr->set_reg; + reg_d = pr->set_reg; + } + + fn += (pr->reg_width >> 3) - 1; + + BUG_ON((j + 1) * pr->field_width > pr->reg_width); + + bit = pr->reg_width - ((j + 1) * pr->field_width); + + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + pr->field_width, bit); + } + } + + if (do_grps) + return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); + + return 0; +} + +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) +static unsigned int __init intc_ack_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id) +{ + struct intc_mask_reg *mr = desc->ack_regs; + unsigned int i, j, fn, mode; + unsigned long reg_e, reg_d; + + for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { + mr = desc->ack_regs + i; + + for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { + if (mr->enum_ids[j] != enum_id) + continue; + + fn = REG_FN_MODIFY_BASE; + mode = MODE_ENABLE_REG; + reg_e = mr->set_reg; + reg_d = mr->set_reg; + + fn += (mr->reg_width >> 3) - 1; + return _INTC_MK(fn, mode, + intc_get_reg(d, reg_e), + intc_get_reg(d, reg_d), + 1, + (mr->reg_width - 1) - j); + } + } + + return 0; +} +#endif + +static unsigned int __init intc_sense_data(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id) +{ + struct intc_sense_reg *sr = desc->sense_regs; + unsigned int i, j, fn, bit; + + for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) { + sr = desc->sense_regs + i; + + for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { + if (sr->enum_ids[j] != enum_id) + continue; + + fn = REG_FN_MODIFY_BASE; + fn += (sr->reg_width >> 3) - 1; + + BUG_ON((j + 1) * sr->field_width > sr->reg_width); + + bit = sr->reg_width - ((j + 1) * sr->field_width); + + return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), + 0, sr->field_width, bit); + } + } + + return 0; +} + +static void __init intc_register_irq(struct intc_desc *desc, + struct intc_desc_int *d, + intc_enum enum_id, + unsigned int irq) +{ + struct intc_handle_int *hp; + unsigned int data[2], primary; + + /* Prefer single interrupt source bitmap over other combinations: + * 1. bitmap, single interrupt source + * 2. priority, single interrupt source + * 3. bitmap, multiple interrupt sources (groups) + * 4. priority, multiple interrupt sources (groups) + */ + + data[0] = intc_mask_data(desc, d, enum_id, 0); + data[1] = intc_prio_data(desc, d, enum_id, 0); + + primary = 0; + if (!data[0] && data[1]) + primary = 1; + + data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1); + data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1); + + if (!data[primary]) + primary ^= 1; + + BUG_ON(!data[primary]); /* must have primary masking method */ + + disable_irq_nosync(irq); + set_irq_chip_and_handler_name(irq, &d->chip, + handle_level_irq, "level"); + set_irq_chip_data(irq, (void *)data[primary]); + + /* set priority level + * - this needs to be at least 2 for 5-bit priorities on 7780 + */ + intc_prio_level[irq] = 2; + + /* enable secondary masking method if present */ + if (data[!primary]) + _intc_enable(irq, data[!primary]); + + /* add irq to d->prio list if priority is available */ + if (data[1]) { + hp = d->prio + d->nr_prio; + hp->irq = irq; + hp->handle = data[1]; + + if (primary) { + /* + * only secondary priority should access registers, so + * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() + */ + + hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); + hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); + } + d->nr_prio++; + } + + /* add irq to d->sense list if sense is available */ + data[0] = intc_sense_data(desc, d, enum_id); + if (data[0]) { + (d->sense + d->nr_sense)->irq = irq; + (d->sense + d->nr_sense)->handle = data[0]; + d->nr_sense++; + } + + /* irq should be disabled by default */ + d->chip.mask(irq); + +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) + if (desc->ack_regs) + ack_handle[irq] = intc_ack_data(desc, d, enum_id); +#endif +} + +static unsigned int __init save_reg(struct intc_desc_int *d, + unsigned int cnt, + unsigned long value, + unsigned int smp) +{ + if (value) { + d->reg[cnt] = value; +#ifdef CONFIG_SMP + d->smp[cnt] = smp; +#endif + return 1; + } + + return 0; +} + + +void __init register_intc_controller(struct intc_desc *desc) +{ + unsigned int i, k, smp; + struct intc_desc_int *d; + + d = alloc_bootmem(sizeof(*d)); + + d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; + d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; + d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; + +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) + d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; +#endif + d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg)); +#ifdef CONFIG_SMP + d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp)); +#endif + k = 0; + + if (desc->mask_regs) { + for (i = 0; i < desc->nr_mask_regs; i++) { + smp = IS_SMP(desc->mask_regs[i]); + k += save_reg(d, k, desc->mask_regs[i].set_reg, smp); + k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp); + } + } + + if (desc->prio_regs) { + d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio)); + + for (i = 0; i < desc->nr_prio_regs; i++) { + smp = IS_SMP(desc->prio_regs[i]); + k += save_reg(d, k, desc->prio_regs[i].set_reg, smp); + k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp); + } + } + + if (desc->sense_regs) { + d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense)); + + for (i = 0; i < desc->nr_sense_regs; i++) { + k += save_reg(d, k, desc->sense_regs[i].reg, 0); + } + } + + d->chip.name = desc->name; + d->chip.mask = intc_disable; + d->chip.unmask = intc_enable; + d->chip.mask_ack = intc_disable; + d->chip.set_type = intc_set_sense; + +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) + if (desc->ack_regs) { + for (i = 0; i < desc->nr_ack_regs; i++) + k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); + + d->chip.mask_ack = intc_mask_ack; + } +#endif + + BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + + for (i = 0; i < desc->nr_vectors; i++) { + struct intc_vect *vect = desc->vectors + i; + + intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); + } +} diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h new file mode 100644 index 0000000..68e212f --- /dev/null +++ b/include/linux/sh_intc.h @@ -0,0 +1,91 @@ +#ifndef __SH_INTC_H +#define __SH_INTC_H + +typedef unsigned char intc_enum; + +struct intc_vect { + intc_enum enum_id; + unsigned short vect; +}; + +#define INTC_VECT(enum_id, vect) { enum_id, vect } +#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq)) + +struct intc_group { + intc_enum enum_id; + intc_enum enum_ids[32]; +}; + +#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } + +struct intc_mask_reg { + unsigned long set_reg, clr_reg, reg_width; + intc_enum enum_ids[32]; +#ifdef CONFIG_SMP + unsigned long smp; +#endif +}; + +struct intc_prio_reg { + unsigned long set_reg, clr_reg, reg_width, field_width; + intc_enum enum_ids[16]; +#ifdef CONFIG_SMP + unsigned long smp; +#endif +}; + +struct intc_sense_reg { + unsigned long reg, reg_width, field_width; + intc_enum enum_ids[16]; +}; + +#ifdef CONFIG_SMP +#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8) +#else +#define INTC_SMP(stride, nr) +#endif + +struct intc_desc { + struct intc_vect *vectors; + unsigned int nr_vectors; + struct intc_group *groups; + unsigned int nr_groups; + struct intc_mask_reg *mask_regs; + unsigned int nr_mask_regs; + struct intc_prio_reg *prio_regs; + unsigned int nr_prio_regs; + struct intc_sense_reg *sense_regs; + unsigned int nr_sense_regs; + char *name; +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) + struct intc_mask_reg *ack_regs; + unsigned int nr_ack_regs; +#endif +}; + +#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) +#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \ + mask_regs, prio_regs, sense_regs) \ +struct intc_desc symbol __initdata = { \ + _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ + _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ + _INTC_ARRAY(sense_regs), \ + chipname, \ +} + +#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) +#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ + mask_regs, prio_regs, sense_regs, ack_regs) \ +struct intc_desc symbol __initdata = { \ + _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \ + _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \ + _INTC_ARRAY(sense_regs), \ + chipname, \ + _INTC_ARRAY(ack_regs), \ +} +#endif + +void __init register_intc_controller(struct intc_desc *desc); +int intc_set_priority(unsigned int irq, unsigned int prio); + +#endif /* __SH_INTC_H */ -- cgit v0.10.2 From 225c9a8d1da274bf23efec43ec28b1c9e45e12f8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Oct 2008 16:24:32 +0900 Subject: video: sh_mobile_lcdcfb: Support HAVE_CLK=n configurations. This provides a workaround for users of sh_mobile_lcdcfb that don't define HAVE_CLK and have otherwise sane clock initialization. At the same time, move the sh_mobile_lcdc.h header to include/video/. Signed-off-by: Paul Mundt diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c index 00e632f..7ae8dcd 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/board-ap325rxa.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include