From 5cd0c3442021fbf39c7152b341a952aa24054be9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 3 May 2005 12:18:46 +0100 Subject: [PATCH] ARM: decompressor: use platform debug macros Rather than duplicate the assembly for debug macros in the decompressor head.S, use asm/arch/debug-macros.S instead. Signed-off-by: Russell King diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index c0e7aff..7c7f475 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -18,48 +18,30 @@ * Please select one of the following when turning on debugging. */ #ifdef DEBUG -#if defined(CONFIG_DEBUG_DC21285_PORT) - .macro loadsp, rb - mov \rb, #0x42000000 - .endm - .macro writeb, rb - str \rb, [r3, #0x160] - .endm -#elif defined(CONFIG_DEBUG_ICEDCC) + +#include + +#if defined(CONFIG_DEBUG_ICEDCC) .macro loadsp, rb .endm - .macro writeb, rb - mcr p14, 0, \rb, c0, c1, 0 - .endm -#elif defined(CONFIG_FOOTBRIDGE) - .macro loadsp, rb - mov \rb, #0x7c000000 + .macro writeb, ch, rb + mcr p14, 0, \ch, c0, c1, 0 .endm - .macro writeb, rb - strb \rb, [r3, #0x3f8] +#else + .macro writeb, ch, rb + senduart \ch, \rb .endm -#elif defined(CONFIG_ARCH_RPC) + +#if defined(CONFIG_FOOTBRIDGE) || \ + defined(CONFIG_ARCH_RPC) || \ + defined(CONFIG_ARCH_INTEGRATOR) || \ + defined(CONFIG_ARCH_PXA) || \ + defined(CONFIG_ARCH_IXP4XX) || \ + defined(CONFIG_ARCH_IXP2000) || \ + defined(CONFIG_ARCH_LH7A40X) || \ + defined(CONFIG_ARCH_OMAP) .macro loadsp, rb - mov \rb, #0x03000000 - orr \rb, \rb, #0x00010000 - .endm - .macro writeb, rb - strb \rb, [r3, #0x3f8 << 2] - .endm -#elif defined(CONFIG_ARCH_INTEGRATOR) - .macro loadsp, rb - mov \rb, #0x16000000 - .endm - .macro writeb, rb - strb \rb, [r3, #0] - .endm -#elif defined(CONFIG_ARCH_PXA) /* Xscale-type */ - .macro loadsp, rb - mov \rb, #0x40000000 - orr \rb, \rb, #0x00100000 - .endm - .macro writeb, rb - strb \rb, [r3, #0] + addruart \rb .endm #elif defined(CONFIG_ARCH_SA1100) .macro loadsp, rb @@ -70,65 +52,22 @@ add \rb, \rb, #0x00010000 @ Ser1 # endif .endm - .macro writeb, rb - str \rb, [r3, #0x14] @ UTDR - .endm -#elif defined(CONFIG_ARCH_IXP4XX) - .macro loadsp, rb - mov \rb, #0xc8000000 - .endm - .macro writeb, rb - str \rb, [r3, #0] -#elif defined(CONFIG_ARCH_IXP2000) - .macro loadsp, rb - mov \rb, #0xc0000000 - orr \rb, \rb, #0x00030000 - .endm - .macro writeb, rb - str \rb, [r3, #0] - .endm -#elif defined(CONFIG_ARCH_LH7A40X) - .macro loadsp, rb - ldr \rb, =0x80000700 @ UART2 UARTBASE - .endm - .macro writeb, rb - strb \rb, [r3, #0] - .endm -#elif defined(CONFIG_ARCH_OMAP) - .macro loadsp, rb - mov \rb, #0xff000000 @ physical base address - add \rb, \rb, #0x00fb0000 -#if defined(CONFIG_OMAP_LL_DEBUG_UART2) || defined(CONFIG_OMAP_LL_DEBUG_UART3) - add \rb, \rb, #0x00000800 -#endif -#ifdef CONFIG_OMAP_LL_DEBUG_UART3 - add \rb, \rb, #0x00009000 -#endif - .endm - .macro writeb, rb - strb \rb, [r3] - .endm #elif defined(CONFIG_ARCH_IOP331) .macro loadsp, rb mov \rb, #0xff000000 orr \rb, \rb, #0x00ff0000 orr \rb, \rb, #0x0000f700 @ location of the UART .endm - .macro writeb, rb - str \rb, [r3, #0] - .endm #elif defined(CONFIG_ARCH_S3C2410) - .macro loadsp, rb + .macro loadsp, rb mov \rb, #0x50000000 add \rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT .endm - .macro writeb, rb - strb \rb, [r3, #0x20] - .endm #else #error no serial architecture defined #endif #endif +#endif .macro kputc,val mov r0, \val @@ -734,7 +673,7 @@ puts: loadsp r3 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr -2: writeb r2 +2: writeb r2, r3 mov r1, #0x00020000 3: subs r1, r1, #1 bne 3b -- cgit v0.10.2 From 5c3073e691b56dabbdec60dda4258b4e50d64872 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 3 May 2005 12:20:29 +0100 Subject: [PATCH] ARM: cleanup vmalloc start/offset macros VMALLOC_START and VMALLOC_OFFSET are common between all ARM machine classes. Move them into include/asm-arm/pgtable.h, but allow a machine class to override them if required. Signed-off-by: Russell King diff --git a/include/asm-arm/arch-cl7500/vmalloc.h b/include/asm-arm/arch-cl7500/vmalloc.h index 91883de..ba8d7a8 100644 --- a/include/asm-arm/arch-cl7500/vmalloc.h +++ b/include/asm-arm/arch-cl7500/vmalloc.h @@ -1,15 +1,4 @@ /* * linux/include/asm-arm/arch-cl7500/vmalloc.h */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x1c000000) diff --git a/include/asm-arm/arch-clps711x/vmalloc.h b/include/asm-arm/arch-clps711x/vmalloc.h index 42571ed..a5dfe96 100644 --- a/include/asm-arm/arch-clps711x/vmalloc.h +++ b/include/asm-arm/arch-clps711x/vmalloc.h @@ -17,15 +17,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-ebsa110/vmalloc.h b/include/asm-arm/arch-ebsa110/vmalloc.h index 759659b..26674ba 100644 --- a/include/asm-arm/arch-ebsa110/vmalloc.h +++ b/include/asm-arm/arch-ebsa110/vmalloc.h @@ -7,15 +7,4 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x1f000000) diff --git a/include/asm-arm/arch-ebsa285/vmalloc.h b/include/asm-arm/arch-ebsa285/vmalloc.h index def705a..d1ca955 100644 --- a/include/asm-arm/arch-ebsa285/vmalloc.h +++ b/include/asm-arm/arch-ebsa285/vmalloc.h @@ -8,17 +8,6 @@ #include -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) - #ifdef CONFIG_ARCH_FOOTBRIDGE #define VMALLOC_END (PAGE_OFFSET + 0x30000000) #else diff --git a/include/asm-arm/arch-epxa10db/vmalloc.h b/include/asm-arm/arch-epxa10db/vmalloc.h index d31ef85..546fb7d 100644 --- a/include/asm-arm/arch-epxa10db/vmalloc.h +++ b/include/asm-arm/arch-epxa10db/vmalloc.h @@ -17,15 +17,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-h720x/vmalloc.h b/include/asm-arm/arch-h720x/vmalloc.h index 4af523a..b4693cb 100644 --- a/include/asm-arm/arch-h720x/vmalloc.h +++ b/include/asm-arm/arch-h720x/vmalloc.h @@ -5,17 +5,6 @@ #ifndef __ARCH_ARM_VMALLOC_H #define __ARCH_ARM_VMALLOC_H -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) #endif diff --git a/include/asm-arm/arch-imx/vmalloc.h b/include/asm-arm/arch-imx/vmalloc.h index 252038f..cb61691 100644 --- a/include/asm-arm/arch-imx/vmalloc.h +++ b/include/asm-arm/arch-imx/vmalloc.h @@ -17,16 +17,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-integrator/vmalloc.h b/include/asm-arm/arch-integrator/vmalloc.h index 50e9aee..170ccce 100644 --- a/include/asm-arm/arch-integrator/vmalloc.h +++ b/include/asm-arm/arch-integrator/vmalloc.h @@ -17,15 +17,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-iop3xx/vmalloc.h b/include/asm-arm/arch-iop3xx/vmalloc.h index dc1d2a9..0f2f684 100644 --- a/include/asm-arm/arch-iop3xx/vmalloc.h +++ b/include/asm-arm/arch-iop3xx/vmalloc.h @@ -10,9 +10,6 @@ * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) //#define VMALLOC_END (0xe8000000) /* increase usable physical RAM to ~992M per RMK */ #define VMALLOC_END (0xfe000000) diff --git a/include/asm-arm/arch-ixp2000/vmalloc.h b/include/asm-arm/arch-ixp2000/vmalloc.h index 2e4bcbc..473dff4 100644 --- a/include/asm-arm/arch-ixp2000/vmalloc.h +++ b/include/asm-arm/arch-ixp2000/vmalloc.h @@ -17,7 +17,4 @@ * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END 0xfaffefff diff --git a/include/asm-arm/arch-ixp4xx/vmalloc.h b/include/asm-arm/arch-ixp4xx/vmalloc.h index da46e56..050d46e 100644 --- a/include/asm-arm/arch-ixp4xx/vmalloc.h +++ b/include/asm-arm/arch-ixp4xx/vmalloc.h @@ -1,17 +1,5 @@ /* * linux/include/asm-arm/arch-ixp4xx/vmalloc.h */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END (0xFF000000) diff --git a/include/asm-arm/arch-l7200/vmalloc.h b/include/asm-arm/arch-l7200/vmalloc.h index edeaebe..816231e 100644 --- a/include/asm-arm/arch-l7200/vmalloc.h +++ b/include/asm-arm/arch-l7200/vmalloc.h @@ -1,15 +1,4 @@ /* * linux/include/asm-arm/arch-l7200/vmalloc.h */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-lh7a40x/vmalloc.h b/include/asm-arm/arch-lh7a40x/vmalloc.h index 5ac6079..8163e45 100644 --- a/include/asm-arm/arch-lh7a40x/vmalloc.h +++ b/include/asm-arm/arch-lh7a40x/vmalloc.h @@ -7,15 +7,4 @@ * version 2 as published by the Free Software Foundation. * */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after - * the physical memory until the kernel virtual memory starts. That - * means that any out-of-bounds memory accesses will hopefully be - * caught. The vmalloc() routines leaves a hole of 4kB (one page) - * between each vmalloced area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (0xe8000000) diff --git a/include/asm-arm/arch-omap/vmalloc.h b/include/asm-arm/arch-omap/vmalloc.h index c6a8358..5b8bd8d 100644 --- a/include/asm-arm/arch-omap/vmalloc.h +++ b/include/asm-arm/arch-omap/vmalloc.h @@ -17,17 +17,5 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-pxa/vmalloc.h b/include/asm-arm/arch-pxa/vmalloc.h index 3381af6..5bb450c 100644 --- a/include/asm-arm/arch-pxa/vmalloc.h +++ b/include/asm-arm/arch-pxa/vmalloc.h @@ -8,15 +8,4 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (0xe8000000) diff --git a/include/asm-arm/arch-rpc/vmalloc.h b/include/asm-arm/arch-rpc/vmalloc.h index a13c27f..077046b 100644 --- a/include/asm-arm/arch-rpc/vmalloc.h +++ b/include/asm-arm/arch-rpc/vmalloc.h @@ -7,15 +7,4 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x1c000000) diff --git a/include/asm-arm/arch-s3c2410/vmalloc.h b/include/asm-arm/arch-s3c2410/vmalloc.h index 5fe72ad..33963cd 100644 --- a/include/asm-arm/arch-s3c2410/vmalloc.h +++ b/include/asm-arm/arch-s3c2410/vmalloc.h @@ -19,18 +19,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ - -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END (0xE0000000) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/include/asm-arm/arch-sa1100/vmalloc.h b/include/asm-arm/arch-sa1100/vmalloc.h index 135bc94..2fb1c6f 100644 --- a/include/asm-arm/arch-sa1100/vmalloc.h +++ b/include/asm-arm/arch-sa1100/vmalloc.h @@ -1,15 +1,4 @@ /* * linux/include/asm-arm/arch-sa1100/vmalloc.h */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (0xe8000000) diff --git a/include/asm-arm/arch-shark/vmalloc.h b/include/asm-arm/arch-shark/vmalloc.h index 1cc2009..10db5d1 100644 --- a/include/asm-arm/arch-shark/vmalloc.h +++ b/include/asm-arm/arch-shark/vmalloc.h @@ -1,15 +1,4 @@ /* * linux/include/asm-arm/arch-rpc/vmalloc.h */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_END (PAGE_OFFSET + 0x10000000) diff --git a/include/asm-arm/arch-versatile/vmalloc.h b/include/asm-arm/arch-versatile/vmalloc.h index adfb3482..ac780df 100644 --- a/include/asm-arm/arch-versatile/vmalloc.h +++ b/include/asm-arm/arch-versatile/vmalloc.h @@ -18,16 +18,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END (PAGE_OFFSET + 0x18000000) diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index 2df4eac..a9892eb 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -17,6 +17,23 @@ #include /* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + * + * Note that platforms may override VMALLOC_START, but they must provide + * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, + * which may not overlap IO space. + */ +#ifndef VMALLOC_START +#define VMALLOC_OFFSET (8*1024*1024) +#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) +#endif + +/* * Hardware-wise, we have a two level page table structure, where the first * level has 4096 entries, and the second level has 256 entries. Each entry * is one 32-bit word. Most of the bits in the second level entry are used -- cgit v0.10.2 From 1f9c381fa3e0b9b9042e310c69df87eaf9b46ea4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 3 May 2005 12:22:19 +0100 Subject: [PATCH] ARM: Clean up commenting/spacing for Integrator Signed-off-by: Russell King diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 86c50c3..bd17b51 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -216,7 +216,9 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) write_seqlock(&xtime_lock); - // ...clear the interrupt + /* + * clear the interrupt + */ timer1->TimerClear = 1; timer_tick(regs); @@ -264,7 +266,7 @@ void __init integrator_time_init(unsigned long reload, unsigned int ctrl) timer1->TimerValue = timer_reload; timer1->TimerControl = timer_ctrl; - /* + /* * Make irqs happen for the system timer */ setup_irq(IRQ_TIMERINT1, &integrator_timer_irq); diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c index 9d182b7..d2c0ab2 100644 --- a/arch/arm/mach-integrator/leds.c +++ b/arch/arm/mach-integrator/leds.c @@ -37,7 +37,7 @@ static void integrator_leds_event(led_event_t ledevt) unsigned long flags; const unsigned int dbg_base = IO_ADDRESS(INTEGRATOR_DBG_BASE); unsigned int update_alpha_leds; - + // yup, change the LEDs local_irq_save(flags); update_alpha_leds = 0; diff --git a/include/asm-arm/arch-integrator/platform.h b/include/asm-arm/arch-integrator/platform.h index 6b67e41..bd364f5 100644 --- a/include/asm-arm/arch-integrator/platform.h +++ b/include/asm-arm/arch-integrator/platform.h @@ -20,14 +20,14 @@ * * Copyright © ARM Limited 1998. All rights reserved. * ***********************************************************************/ /* ************************************************************************ - * + * * Integrator address map - * + * * NOTE: This is a multi-hosted header file for use with uHAL and * supported debuggers. - * + * * $Id: platform.s,v 1.32 2000/02/18 10:51:39 asims Exp $ - * + * * ***********************************************************************/ #ifndef __address_h @@ -40,22 +40,22 @@ * Memory definitions * ------------------------------------------------------------------------ * Integrator memory map - * + * */ #define INTEGRATOR_BOOT_ROM_LO 0x00000000 #define INTEGRATOR_BOOT_ROM_HI 0x20000000 #define INTEGRATOR_BOOT_ROM_BASE INTEGRATOR_BOOT_ROM_HI /* Normal position */ #define INTEGRATOR_BOOT_ROM_SIZE SZ_512K -/* +/* * New Core Modules have different amounts of SSRAM, the amount of SSRAM * fitted can be found in HDR_STAT. - * + * * The symbol INTEGRATOR_SSRAM_SIZE is kept, however this now refers to * the minimum amount of SSRAM fitted on any core module. - * + * * New Core Modules also alias the SSRAM. - * + * */ #define INTEGRATOR_SSRAM_BASE 0x00000000 #define INTEGRATOR_SSRAM_ALIAS_BASE 0x10800000 @@ -67,9 +67,9 @@ #define INTEGRATOR_MBRD_SSRAM_BASE 0x28000000 #define INTEGRATOR_MBRD_SSRAM_SIZE SZ_512K -/* +/* * SDRAM is a SIMM therefore the size is not known. - * + * */ #define INTEGRATOR_SDRAM_BASE 0x00040000 @@ -79,9 +79,9 @@ #define INTEGRATOR_HDR2_SDRAM_BASE 0xA0000000 #define INTEGRATOR_HDR3_SDRAM_BASE 0xB0000000 -/* +/* * Logic expansion modules - * + * */ #define INTEGRATOR_LOGIC_MODULES_BASE 0xC0000000 #define INTEGRATOR_LOGIC_MODULE0_BASE 0xC0000000 @@ -92,7 +92,7 @@ /* ------------------------------------------------------------------------ * Integrator header card registers * ------------------------------------------------------------------------ - * + * */ #define INTEGRATOR_HDR_ID_OFFSET 0x00 #define INTEGRATOR_HDR_PROC_OFFSET 0x04 @@ -185,12 +185,12 @@ /* ------------------------------------------------------------------------ * Integrator system registers * ------------------------------------------------------------------------ - * + * */ -/* +/* * System Controller - * + * */ #define INTEGRATOR_SC_ID_OFFSET 0x00 #define INTEGRATOR_SC_OSC_OFFSET 0x04 @@ -230,11 +230,11 @@ #define INTEGRATOR_SC_CTRL_URTS1 (1 << 6) #define INTEGRATOR_SC_CTRL_UDTR1 (1 << 7) -/* +/* * External Bus Interface - * + * */ -#define INTEGRATOR_EBI_BASE 0x12000000 +#define INTEGRATOR_EBI_BASE 0x12000000 #define INTEGRATOR_EBI_CSR0_OFFSET 0x00 #define INTEGRATOR_EBI_CSR1_OFFSET 0x04 @@ -279,9 +279,9 @@ #define INTEGRATOR_KBD_BASE 0x18000000 /* Keyboard */ #define INTEGRATOR_MOUSE_BASE 0x19000000 /* Mouse */ -/* +/* * LED's & Switches - * + * */ #define INTEGRATOR_DBG_ALPHA_OFFSET 0x00 #define INTEGRATOR_DBG_LEDS_OFFSET 0x04 @@ -300,7 +300,7 @@ * ------------------------------------------------------------------------ */ /* PS2 Keyboard interface */ -#define KMI0_BASE INTEGRATOR_KBD_BASE +#define KMI0_BASE INTEGRATOR_KBD_BASE /* PS2 Mouse interface */ #define KMI1_BASE INTEGRATOR_MOUSE_BASE @@ -313,7 +313,7 @@ * This represents a fairly liberal usage of address space. Even though * the V3 only has two windows (therefore we need to map stuff on the fly), * we maintain the same addresses, even if they're not mapped. - * + * */ #define PHYS_PCI_MEM_BASE 0x40000000 /* 512M to xxx */ /* unused 256M from A0000000-AFFFFFFF might be used for I2O ??? @@ -326,7 +326,7 @@ */ #define PHYS_PCI_V3_BASE 0x62000000 -#define PCI_DRAMSIZE INTEGRATOR_SSRAM_SIZE +#define PCI_DRAMSIZE INTEGRATOR_SSRAM_SIZE /* 'export' these to UHAL */ #define UHAL_PCI_IO PCI_IO_BASE @@ -334,7 +334,7 @@ #define UHAL_PCI_ALLOC_IO_BASE 0x00004000 #define UHAL_PCI_ALLOC_MEM_BASE PCI_MEM_BASE #define UHAL_PCI_MAX_SLOT 20 - + /* ======================================================================== * Start of uHAL definitions * ======================================================================== @@ -343,17 +343,17 @@ /* ------------------------------------------------------------------------ * Integrator Interrupt Controllers * ------------------------------------------------------------------------ - * - * Offsets from interrupt controller base - * + * + * Offsets from interrupt controller base + * * System Controller interrupt controller base is - * + * * INTEGRATOR_IC_BASE + (header_number << 6) - * + * * Core Module interrupt controller base is - * - * INTEGRATOR_HDR_IC - * + * + * INTEGRATOR_HDR_IC + * */ #define IRQ_STATUS 0 #define IRQ_RAW_STATUS 0x04 @@ -374,22 +374,22 @@ /* ------------------------------------------------------------------------ * Interrupts * ------------------------------------------------------------------------ - * - * + * + * * Each Core Module has two interrupts controllers, one on the core module * itself and one in the system controller on the motherboard. The * READ_INT macro in target.s reads both interrupt controllers and returns * a 32 bit bitmask, bits 0 to 23 are interrupts from the system controller * and bits 24 to 31 are from the core module. - * + * * The following definitions relate to the bitmask returned by READ_INT. - * + * */ /* ------------------------------------------------------------------------ * LED's - The header LED is not accessible via the uHAL API * ------------------------------------------------------------------------ - * + * */ #define GREEN_LED 0x01 #define YELLOW_LED 0x02 @@ -399,44 +399,44 @@ #define LED_BANK INTEGRATOR_DBG_LEDS -/* +/* * Memory definitions - run uHAL out of SSRAM. - * + * */ #define uHAL_MEMORY_SIZE INTEGRATOR_SSRAM_SIZE -/* +/* * Application Flash - * + * */ #define FLASH_BASE INTEGRATOR_FLASH_BASE #define FLASH_SIZE INTEGRATOR_FLASH_SIZE #define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) #define FLASH_BLOCK_SIZE SZ_128K -/* +/* * Boot Flash - * + * */ #define EPROM_BASE INTEGRATOR_BOOT_ROM_HI #define EPROM_SIZE INTEGRATOR_BOOT_ROM_SIZE #define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) -/* +/* * Clean base - dummy - * + * */ #define CLEAN_BASE EPROM_BASE -/* +/* * Timer definitions - * + * * Only use timer 1 & 2 * (both run at 24MHz and will need the clock divider set to 16). - * + * * Timer 0 runs at bus frequency and therefore could vary and currently * uHAL can't handle that. - * + * */ #define INTEGRATOR_TIMER0_BASE INTEGRATOR_CT_BASE @@ -447,9 +447,9 @@ #define MAX_PERIOD 699050 #define TICKS_PER_uSEC 24 -/* - * These are useconds NOT ticks. - * +/* + * These are useconds NOT ticks. + * */ #define mSEC_1 1000 #define mSEC_5 (mSEC_1 * 5) -- cgit v0.10.2 From eca02b0c1dc1da374216128157747d8ed994e5ef Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 3 May 2005 12:23:56 +0100 Subject: [PATCH] ARM: Cleanup kmalloc in cyber2000fb We use one kmalloc to allocate two structures needlessly. Combine these two structures into one. Signed-off-by: Russell King diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c index 8b1b7c6..3894b2a 100644 --- a/drivers/video/cyber2000fb.c +++ b/drivers/video/cyber2000fb.c @@ -90,6 +90,8 @@ struct cfb_info { */ u_char ramdac_ctrl; u_char ramdac_powerdown; + + u32 pseudo_palette[16]; }; static char *default_font = "Acorn8x8"; @@ -1223,9 +1225,7 @@ cyberpro_alloc_fb_info(unsigned int id, char *name) { struct cfb_info *cfb; - cfb = kmalloc(sizeof(struct cfb_info) + - sizeof(u32) * 16, GFP_KERNEL); - + cfb = kmalloc(sizeof(struct cfb_info), GFP_KERNEL); if (!cfb) return NULL; @@ -1281,7 +1281,7 @@ cyberpro_alloc_fb_info(unsigned int id, char *name) cfb->fb.fbops = &cyber2000fb_ops; cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; - cfb->fb.pseudo_palette = (void *)(cfb + 1); + cfb->fb.pseudo_palette = cfb->pseudo_palette; fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0); -- cgit v0.10.2 From 52292c9b8c16aa9024a65aaeeca434bb8fec7d24 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 3 May 2005 15:34:58 +1000 Subject: [PATCH] ppc64: fix gcc 4.0 vs CONFIG_ALTIVEC gcc-4.0 generates altivec code implicitly when -mcpu indicates an altivec capable CPU which is not suitable for the kernel. However, we used to set -mcpu=970 when CONFIG_ALTIVEC was set because a gcc-3.x bug prevented from using -maltivec along with -mcpu=power4, thus prevented building the RAID6 altivec code. This patch fixes all of this by testing for the gcc version. If 4.0 or later, just normally use -mcpu=power4 and let the RAID6 code add -maltivec to the few files it needs to be compiled with altivec support. For 3.x, we still use -mcpu=970 to work around the above problem, which is fine as 3.x will never implicitly generate altivec code. The Makefile hackery may not be the most lovely, I welcome anybody more skilled than me to improve it. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Linus Torvalds diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index d33e20b..691f300 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile @@ -56,13 +56,20 @@ LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ -mcall-aixdesc +GCC_VERSION := $(call cc-version) +GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;) + ifeq ($(CONFIG_POWER4_ONLY),y) ifeq ($(CONFIG_ALTIVEC),y) +ifeq ($(GCC_BROKEN_VEC),y) CFLAGS += $(call cc-option,-mcpu=970) else CFLAGS += $(call cc-option,-mcpu=power4) endif else + CFLAGS += $(call cc-option,-mcpu=power4) +endif +else CFLAGS += $(call cc-option,-mtune=power4) endif -- cgit v0.10.2 From 7a9bdd8842d0847f3b15068550a3632a2d259057 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 5 Apr 2005 18:05:00 -0700 Subject: [IA64] Add config SCHED_SMT Now that we have MC/MT detection patches in, appended patch allows us to configure MT scheduler optimizations. For now, we will this option off by default. There is some discussion going on lkml about setting up sched-domains which are absolutely needed (like for example, we shouldn't setup SMT domain for non MT processors). Once that patch goes in, we can enable this option by default. Signed-off-by: Suresh Siddha Signed-off-by: Tony Luck diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 468dbe8..cad8346 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -261,6 +261,15 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. +config SCHED_SMT + bool "SMT scheduler support" + depends on SMP + default off + help + Improves the CPU scheduler's decision making when dealing with + Intel IA64 chips with MultiThreading at a cost of slightly increased + overhead in some places. If unsure say N here. + config PREEMPT bool "Preemptible Kernel" help -- cgit v0.10.2 From 512f64295f2f0049515dcc4e97c1f1ae0df9629c Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Tue, 26 Apr 2005 23:00:00 -0700 Subject: [IA64] Fix memcpy_mck.S for current binutils The current ia64 assembler complains about mismatching .proc/.endp pairs. (Same patch also sent by H.J. Lu) Signed-off-by: Andreas Schwab Signed-off-by: Tony Luck diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index 3c2cd2f..6f308e6 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S @@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy) mov f6=f0 br.cond.sptk .common_code ;; +END(memcpy) GLOBAL_ENTRY(__copy_user) .prologue // check dest alignment @@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ #undef B #undef C #undef D -END(memcpy) /* * Due to lack of local tag support in gcc 2.x assembler, it is not clear which -- cgit v0.10.2 From 9df6f705c0934a49d0f0a3468a5b5044c8aec4f1 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Fri, 25 Mar 2005 00:16:00 -0700 Subject: [IA64] fix typos caught by new assembler Patch below fixes 3 trivial typos which are caught by the new assembler (v2.169.90). Please apply. [Note: fix to memcpy that was also part of this patch was separately applied from patches by H.J. and Andreas ... so the delta here only has the other two fixes. -Tony] Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d3f0938..81c45d4 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit .mem.offset 8,0 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit -END(ia64_ret_from_ia32_execve_syscall) +END(ia64_ret_from_ia32_execve) // fall through #endif /* CONFIG_IA32_SUPPORT */ GLOBAL_ENTRY(ia64_leave_kernel) diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S index bd8cf90..f26c16a 100644 --- a/arch/ia64/lib/memset.S +++ b/arch/ia64/lib/memset.S @@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset) { .mmi .prologue alloc tmp = ar.pfs, 3, 0, 0, 0 - .body lfetch.nt1 [dest] // .save ar.lc, save_lc mov.i save_lc = ar.lc + .body } { .mmi mov ret0 = dest // return value cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero -- cgit v0.10.2 From 6adc4cc0eead0c1897d7f1416d749c8e7c91e0bc Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 16 Feb 2005 16:38:00 -0700 Subject: [IA64-SGI] remove unused sn header files This patch makes Jes' patch (which also contains the removal of fetchop.h) a bit smaller, and removes two other unused files at the same time, sndrv.h and sn_fru.h. Signed-off-by: Jesse Barnes Signed-off-by: Tony Luck diff --git a/include/asm-ia64/sn/fetchop.h b/include/asm-ia64/sn/fetchop.h deleted file mode 100644 index 5f4ad8f..0000000 --- a/include/asm-ia64/sn/fetchop.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. - */ - -#ifndef _ASM_IA64_SN_FETCHOP_H -#define _ASM_IA64_SN_FETCHOP_H - -#include - -#define FETCHOP_BASENAME "sgi_fetchop" -#define FETCHOP_FULLNAME "/dev/sgi_fetchop" - - - -#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */ - -#define FETCHOP_LOAD 0 -#define FETCHOP_INCREMENT 8 -#define FETCHOP_DECREMENT 16 -#define FETCHOP_CLEAR 24 - -#define FETCHOP_STORE 0 -#define FETCHOP_AND 24 -#define FETCHOP_OR 32 - -#define FETCHOP_CLEAR_CACHE 56 - -#define FETCHOP_LOAD_OP(addr, op) ( \ - *(volatile long *)((char*) (addr) + (op))) - -#define FETCHOP_STORE_OP(addr, op, x) ( \ - *(volatile long *)((char*) (addr) + (op)) = (long) (x)) - -#ifdef __KERNEL__ - -/* - * Convert a region 6 (kaddr) address to the address of the fetchop variable - */ -#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr) - - -/* - * Each Atomic Memory Operation (AMO formerly known as fetchop) - * variable is 64 bytes long. The first 8 bytes are used. The - * remaining 56 bytes are unaddressable due to the operation taking - * that portion of the address. - * - * NOTE: The AMO_t _MUST_ be placed in either the first or second half - * of the cache line. The cache line _MUST NOT_ be used for anything - * other than additional AMO_t entries. This is because there are two - * addresses which reference the same physical cache line. One will - * be a cached entry with the memory type bits all set. This address - * may be loaded into processor cache. The AMO_t will be referenced - * uncached via the memory special memory type. If any portion of the - * cached cache-line is modified, when that line is flushed, it will - * overwrite the uncached value in physical memory and lead to - * inconsistency. - */ -typedef struct { - u64 variable; - u64 unused[7]; -} AMO_t; - - -/* - * The following APIs are externalized to the kernel to allocate/free pages of - * fetchop variables. - * fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the - * specified cnode. - * fetchop_kfree_page - Free a previously allocated fetchop page - */ - -unsigned long fetchop_kalloc_page(int nid); -void fetchop_kfree_page(unsigned long maddr); - - -#endif /* __KERNEL__ */ - -#endif /* _ASM_IA64_SN_FETCHOP_H */ - diff --git a/include/asm-ia64/sn/sn_fru.h b/include/asm-ia64/sn/sn_fru.h deleted file mode 100644 index 8c21ac3..0000000 --- a/include/asm-ia64/sn/sn_fru.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved. - */ -#ifndef _ASM_IA64_SN_SN_FRU_H -#define _ASM_IA64_SN_SN_FRU_H - -#define MAX_DIMMS 8 /* max # of dimm banks */ -#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */ - -typedef unsigned char confidence_t; - -typedef struct kf_mem_s { - confidence_t km_confidence; /* confidence level that the memory is bad - * is this necessary ? - */ - confidence_t km_dimm[MAX_DIMMS]; - /* confidence level that dimm[i] is bad - *I think this is the right number - */ - -} kf_mem_t; - -typedef struct kf_cpu_s { - confidence_t kc_confidence; /* confidence level that cpu is bad */ - confidence_t kc_icache; /* confidence level that instr. cache is bad */ - confidence_t kc_dcache; /* confidence level that data cache is bad */ - confidence_t kc_scache; /* confidence level that sec. cache is bad */ - confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */ -} kf_cpu_t; - - -typedef struct kf_pci_bus_s { - confidence_t kpb_belief; /* confidence level that the pci bus is bad */ - confidence_t kpb_pcidev_belief[MAX_PCIDEV]; - /* confidence level that the pci dev is bad */ -} kf_pci_bus_t; - - -#endif /* _ASM_IA64_SN_SN_FRU_H */ - diff --git a/include/asm-ia64/sn/sndrv.h b/include/asm-ia64/sn/sndrv.h deleted file mode 100644 index aa00d42..0000000 --- a/include/asm-ia64/sn/sndrv.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. - */ - -#ifndef _ASM_IA64_SN_SNDRV_H -#define _ASM_IA64_SN_SNDRV_H - -/* ioctl commands */ -#define SNDRV_GET_ROUTERINFO 1 -#define SNDRV_GET_INFOSIZE 2 -#define SNDRV_GET_HUBINFO 3 -#define SNDRV_GET_FLASHLOGSIZE 4 -#define SNDRV_SET_FLASHSYNC 5 -#define SNDRV_GET_FLASHLOGDATA 6 -#define SNDRV_GET_FLASHLOGALL 7 - -#define SNDRV_SET_HISTOGRAM_TYPE 14 - -#define SNDRV_ELSC_COMMAND 19 -#define SNDRV_CLEAR_LOG 20 -#define SNDRV_INIT_LOG 21 -#define SNDRV_GET_PIMM_PSC 22 -#define SNDRV_SET_PARTITION 23 -#define SNDRV_GET_PARTITION 24 - -/* see synergy_perf_ioctl() */ -#define SNDRV_GET_SYNERGY_VERSION 30 -#define SNDRV_GET_SYNERGY_STATUS 31 -#define SNDRV_GET_SYNERGYINFO 32 -#define SNDRV_SYNERGY_APPEND 33 -#define SNDRV_SYNERGY_ENABLE 34 -#define SNDRV_SYNERGY_FREQ 35 - -/* Devices */ -#define SNDRV_UKNOWN_DEVICE -1 -#define SNDRV_ROUTER_DEVICE 1 -#define SNDRV_HUB_DEVICE 2 -#define SNDRV_ELSC_NVRAM_DEVICE 3 -#define SNDRV_ELSC_CONTROLLER_DEVICE 4 -#define SNDRV_SYSCTL_SUBCH 5 -#define SNDRV_SYNERGY_DEVICE 6 - -#endif /* _ASM_IA64_SN_SNDRV_H */ -- cgit v0.10.2 From 43cc67251882f60c796c8729cefc0e05b550976c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Thu, 17 Feb 2005 09:41:00 -0700 Subject: [IA64-SGI] cleanup shubio.h This patch cleans up include/asm/sn/shubio.h by removing a ton of whitespaces and running it through Lindent, reducing it's size by almost 30KB. No actual content has been changed. Signed-off-by: Jes Sorensen Signed-off-by: Tony Luck diff --git a/include/asm-ia64/sn/shubio.h b/include/asm-ia64/sn/shubio.h index fbd880e..831b721 100644 --- a/include/asm-ia64/sn/shubio.h +++ b/include/asm-ia64/sn/shubio.h @@ -3,292 +3,287 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_IA64_SN_SHUBIO_H #define _ASM_IA64_SN_SHUBIO_H -#define HUB_WIDGET_ID_MAX 0xf -#define IIO_NUM_ITTES 7 -#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) - -#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ - /* This register is also accessible from - * Crosstalk at address 0x0. */ -#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ -#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ -#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ -#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ -#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ -#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ -#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ -#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ -#define IIO_ILLR 0x00400130 /* IO LLP Log Register */ -#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ - -#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ -#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ - -#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ -#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ - -#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ -#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ -#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ -#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ -#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ -#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ -#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ - -#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ -#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ -#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ -#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ -#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ -#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ -#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ -#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ -#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ - -#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ -#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ -#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ -#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ -#define IIO_IBCR 0x00400200 /* IO BTE Control Register */ - -#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ -#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ - -#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ - -#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ -#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ - - -#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ -#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ - -#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ -#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ -#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ -#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ -#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ - -#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ - -#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ -#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ -#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ -#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ -#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ -#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ -#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ -#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ - -#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ -#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ -#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ -#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ -#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ -#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ -#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ -#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ - -#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ -#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ -#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ -#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ -#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ -#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ -#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ -#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ - -#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ -#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ -#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ -#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ -#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ - -#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ -#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ -#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ -#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ -#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ - -#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ -#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ -#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ -#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ -#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ - -#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ -#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ -#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ -#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ -#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ - -#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ -#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ -#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ -#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ -#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ - -#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ -#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ -#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ -#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ -#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ - -#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ -#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ -#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ -#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ -#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ - -#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ -#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ -#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ -#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ -#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ - -#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ -#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ -#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ -#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ -#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ - -#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ -#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ -#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ -#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ -#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ - -#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ -#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ -#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ -#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ -#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ - -#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ -#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ -#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ -#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ -#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ - -#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ -#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ -#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ -#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ -#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ - -#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ -#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ -#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ -#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ -#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ - -#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ -#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ -#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ -#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ -#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ - -#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ -#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ -#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ - -#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ - -#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ -#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ -#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ -#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ -#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ -#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ -#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ -#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ -#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ -#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ -#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ -#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ - -#define IIO_IPCR 0x00430000 /* IO Performance Control */ -#define IIO_IPPR 0x00430008 /* IO Performance Profiling */ - - -/************************************************************************ - * * +#define HUB_WIDGET_ID_MAX 0xf +#define IIO_NUM_ITTES 7 +#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) + +#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ + /* This register is also accessible from + * Crosstalk at address 0x0. */ +#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ +#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ +#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ +#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ +#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ +#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ +#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ +#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ +#define IIO_ILLR 0x00400130 /* IO LLP Log Register */ +#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ + +#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ +#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ + +#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ +#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ + +#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ +#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ +#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ +#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ +#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ +#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ +#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ + +#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ +#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ +#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ +#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ +#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ +#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ +#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ +#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ +#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ + +#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ +#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ +#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ +#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ +#define IIO_IBCR 0x00400200 /* IO BTE Control Register */ + +#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ +#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ + +#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ + +#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ +#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ + +#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ +#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ + +#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ +#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ +#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ +#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ +#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ + +#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ + +#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ +#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ +#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ +#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ +#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ +#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ +#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ +#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ + +#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ +#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ +#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ +#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ +#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ +#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ +#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ +#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ + +#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ +#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ +#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ +#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ +#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ +#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ +#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ +#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ + +#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ +#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ +#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ +#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ +#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ + +#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ +#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ +#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ +#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ +#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ + +#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ +#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ +#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ +#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ +#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ + +#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ +#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ +#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ +#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ +#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ + +#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ +#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ +#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ +#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ +#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ + +#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ +#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ +#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ +#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ +#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ + +#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ +#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ +#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ +#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ +#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ + +#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ +#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ +#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ +#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ +#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ + +#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ +#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ +#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ +#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ +#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ + +#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ +#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ +#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ +#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ +#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ + +#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ +#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ +#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ +#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ +#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ + +#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ +#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ +#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ +#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ +#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ + +#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ +#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ +#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ +#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ +#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ + +#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ +#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ +#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ +#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ +#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ + +#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ +#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ +#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ +#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ +#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ + +#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ +#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ +#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ + +#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ + +#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ +#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ +#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ +#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ +#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ +#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ +#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ +#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ +#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ +#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ +#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ +#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ + +#define IIO_IPCR 0x00430000 /* IO Performance Control */ +#define IIO_IPPR 0x00430008 /* IO Performance Profiling */ + +/************************************************************************ + * * * Description: This register echoes some information from the * * LB_REV_ID register. It is available through Crosstalk as described * * above. The REV_NUM and MFG_NUM fields receive their values from * * the REVISION and MANUFACTURER fields in the LB_REV_ID register. * * The PART_NUM field's value is the Crosstalk device ID number that * * Steve Miller assigned to the SHub chip. * - * * + * * ************************************************************************/ typedef union ii_wid_u { - uint64_t ii_wid_regval; - struct { - uint64_t w_rsvd_1 : 1; - uint64_t w_mfg_num : 11; - uint64_t w_part_num : 16; - uint64_t w_rev_num : 4; - uint64_t w_rsvd : 32; + uint64_t ii_wid_regval; + struct { + uint64_t w_rsvd_1:1; + uint64_t w_mfg_num:11; + uint64_t w_part_num:16; + uint64_t w_rev_num:4; + uint64_t w_rsvd:32; } ii_wid_fld_s; } ii_wid_u_t; - /************************************************************************ - * * + * * * The fields in this register are set upon detection of an error * * and cleared by various mechanisms, as explained in the * * description. * - * * + * * ************************************************************************/ typedef union ii_wstat_u { - uint64_t ii_wstat_regval; - struct { - uint64_t w_pending : 4; - uint64_t w_xt_crd_to : 1; - uint64_t w_xt_tail_to : 1; - uint64_t w_rsvd_3 : 3; - uint64_t w_tx_mx_rty : 1; - uint64_t w_rsvd_2 : 6; - uint64_t w_llp_tx_cnt : 8; - uint64_t w_rsvd_1 : 8; - uint64_t w_crazy : 1; - uint64_t w_rsvd : 31; + uint64_t ii_wstat_regval; + struct { + uint64_t w_pending:4; + uint64_t w_xt_crd_to:1; + uint64_t w_xt_tail_to:1; + uint64_t w_rsvd_3:3; + uint64_t w_tx_mx_rty:1; + uint64_t w_rsvd_2:6; + uint64_t w_llp_tx_cnt:8; + uint64_t w_rsvd_1:8; + uint64_t w_crazy:1; + uint64_t w_rsvd:31; } ii_wstat_fld_s; } ii_wstat_u_t; - /************************************************************************ - * * + * * * Description: This is a read-write enabled register. It controls * * various aspects of the Crosstalk flow control. * - * * + * * ************************************************************************/ typedef union ii_wcr_u { - uint64_t ii_wcr_regval; - struct { - uint64_t w_wid : 4; - uint64_t w_tag : 1; - uint64_t w_rsvd_1 : 8; - uint64_t w_dst_crd : 3; - uint64_t w_f_bad_pkt : 1; - uint64_t w_dir_con : 1; - uint64_t w_e_thresh : 5; - uint64_t w_rsvd : 41; + uint64_t ii_wcr_regval; + struct { + uint64_t w_wid:4; + uint64_t w_tag:1; + uint64_t w_rsvd_1:8; + uint64_t w_dst_crd:3; + uint64_t w_f_bad_pkt:1; + uint64_t w_dir_con:1; + uint64_t w_e_thresh:5; + uint64_t w_rsvd:41; } ii_wcr_fld_s; } ii_wcr_u_t; - /************************************************************************ - * * + * * * Description: This register's value is a bit vector that guards * * access to local registers within the II as well as to external * * Crosstalk widgets. Each bit in the register corresponds to a * @@ -311,21 +306,18 @@ typedef union ii_wcr_u { * region ID bits are enabled in this same register. It can also be * * accessed through the IAlias space by the local processors. * * The reset value of this register allows access by all nodes. * - * * + * * ************************************************************************/ typedef union ii_ilapr_u { - uint64_t ii_ilapr_regval; - struct { - uint64_t i_region : 64; + uint64_t ii_ilapr_regval; + struct { + uint64_t i_region:64; } ii_ilapr_fld_s; } ii_ilapr_u_t; - - - /************************************************************************ - * * + * * * Description: A write to this register of the 64-bit value * * "SGIrules" in ASCII, will cause the bit in the ILAPR register * * corresponding to the region of the requestor to be set (allow * @@ -334,59 +326,54 @@ typedef union ii_ilapr_u { * This register can also be accessed through the IAlias space. * * However, this access will not change the access permissions in the * * ILAPR. * - * * + * * ************************************************************************/ typedef union ii_ilapo_u { - uint64_t ii_ilapo_regval; - struct { - uint64_t i_io_ovrride : 64; + uint64_t ii_ilapo_regval; + struct { + uint64_t i_io_ovrride:64; } ii_ilapo_fld_s; } ii_ilapo_u_t; - - /************************************************************************ - * * + * * * This register qualifies all the PIO and Graphics writes launched * * from the SHUB towards a widget. * - * * + * * ************************************************************************/ typedef union ii_iowa_u { - uint64_t ii_iowa_regval; - struct { - uint64_t i_w0_oac : 1; - uint64_t i_rsvd_1 : 7; - uint64_t i_wx_oac : 8; - uint64_t i_rsvd : 48; + uint64_t ii_iowa_regval; + struct { + uint64_t i_w0_oac:1; + uint64_t i_rsvd_1:7; + uint64_t i_wx_oac:8; + uint64_t i_rsvd:48; } ii_iowa_fld_s; } ii_iowa_u_t; - /************************************************************************ - * * + * * * Description: This register qualifies all the requests launched * * from a widget towards the Shub. This register is intended to be * * used by software in case of misbehaving widgets. * - * * - * * + * * + * * ************************************************************************/ typedef union ii_iiwa_u { - uint64_t ii_iiwa_regval; - struct { - uint64_t i_w0_iac : 1; - uint64_t i_rsvd_1 : 7; - uint64_t i_wx_iac : 8; - uint64_t i_rsvd : 48; + uint64_t ii_iiwa_regval; + struct { + uint64_t i_w0_iac:1; + uint64_t i_rsvd_1:7; + uint64_t i_wx_iac:8; + uint64_t i_rsvd:48; } ii_iiwa_fld_s; } ii_iiwa_u_t; - - /************************************************************************ - * * + * * * Description: This register qualifies all the operations launched * * from a widget towards the SHub. It allows individual access * * control for up to 8 devices per widget. A device refers to * @@ -401,72 +388,69 @@ typedef union ii_iiwa_u { * The bits in this field are set by writing a 1 to them. Incoming * * replies from Crosstalk are not subject to this access control * * mechanism. * - * * + * * ************************************************************************/ typedef union ii_iidem_u { - uint64_t ii_iidem_regval; - struct { - uint64_t i_w8_dxs : 8; - uint64_t i_w9_dxs : 8; - uint64_t i_wa_dxs : 8; - uint64_t i_wb_dxs : 8; - uint64_t i_wc_dxs : 8; - uint64_t i_wd_dxs : 8; - uint64_t i_we_dxs : 8; - uint64_t i_wf_dxs : 8; + uint64_t ii_iidem_regval; + struct { + uint64_t i_w8_dxs:8; + uint64_t i_w9_dxs:8; + uint64_t i_wa_dxs:8; + uint64_t i_wb_dxs:8; + uint64_t i_wc_dxs:8; + uint64_t i_wd_dxs:8; + uint64_t i_we_dxs:8; + uint64_t i_wf_dxs:8; } ii_iidem_fld_s; } ii_iidem_u_t; - /************************************************************************ - * * + * * * This register contains the various programmable fields necessary * * for controlling and observing the LLP signals. * - * * + * * ************************************************************************/ typedef union ii_ilcsr_u { - uint64_t ii_ilcsr_regval; - struct { - uint64_t i_nullto : 6; - uint64_t i_rsvd_4 : 2; - uint64_t i_wrmrst : 1; - uint64_t i_rsvd_3 : 1; - uint64_t i_llp_en : 1; - uint64_t i_bm8 : 1; - uint64_t i_llp_stat : 2; - uint64_t i_remote_power : 1; - uint64_t i_rsvd_2 : 1; - uint64_t i_maxrtry : 10; - uint64_t i_d_avail_sel : 2; - uint64_t i_rsvd_1 : 4; - uint64_t i_maxbrst : 10; - uint64_t i_rsvd : 22; + uint64_t ii_ilcsr_regval; + struct { + uint64_t i_nullto:6; + uint64_t i_rsvd_4:2; + uint64_t i_wrmrst:1; + uint64_t i_rsvd_3:1; + uint64_t i_llp_en:1; + uint64_t i_bm8:1; + uint64_t i_llp_stat:2; + uint64_t i_remote_power:1; + uint64_t i_rsvd_2:1; + uint64_t i_maxrtry:10; + uint64_t i_d_avail_sel:2; + uint64_t i_rsvd_1:4; + uint64_t i_maxbrst:10; + uint64_t i_rsvd:22; } ii_ilcsr_fld_s; } ii_ilcsr_u_t; - /************************************************************************ - * * + * * * This is simply a status registers that monitors the LLP error * - * rate. * - * * + * rate. * + * * ************************************************************************/ typedef union ii_illr_u { - uint64_t ii_illr_regval; - struct { - uint64_t i_sn_cnt : 16; - uint64_t i_cb_cnt : 16; - uint64_t i_rsvd : 32; + uint64_t ii_illr_regval; + struct { + uint64_t i_sn_cnt:16; + uint64_t i_cb_cnt:16; + uint64_t i_rsvd:32; } ii_illr_fld_s; } ii_illr_u_t; - /************************************************************************ - * * + * * * Description: All II-detected non-BTE error interrupts are * * specified via this register. * * NOTE: The PI interrupt register address is hardcoded in the II. If * @@ -476,107 +460,100 @@ typedef union ii_illr_u { * PI_ID==1, then the II sends the interrupt request to address * * offset 0x01A0_0090 within the local register address space of PI1 * * on the node specified by the NODE field. * - * * + * * ************************************************************************/ typedef union ii_iidsr_u { - uint64_t ii_iidsr_regval; - struct { - uint64_t i_level : 8; - uint64_t i_pi_id : 1; - uint64_t i_node : 11; - uint64_t i_rsvd_3 : 4; - uint64_t i_enable : 1; - uint64_t i_rsvd_2 : 3; - uint64_t i_int_sent : 2; - uint64_t i_rsvd_1 : 2; - uint64_t i_pi0_forward_int : 1; - uint64_t i_pi1_forward_int : 1; - uint64_t i_rsvd : 30; + uint64_t ii_iidsr_regval; + struct { + uint64_t i_level:8; + uint64_t i_pi_id:1; + uint64_t i_node:11; + uint64_t i_rsvd_3:4; + uint64_t i_enable:1; + uint64_t i_rsvd_2:3; + uint64_t i_int_sent:2; + uint64_t i_rsvd_1:2; + uint64_t i_pi0_forward_int:1; + uint64_t i_pi1_forward_int:1; + uint64_t i_rsvd:30; } ii_iidsr_fld_s; } ii_iidsr_u_t; - - /************************************************************************ - * * + * * * There are two instances of this register. This register is used * * for matching up the incoming responses from the graphics widget to * * the processor that initiated the graphics operation. The * * write-responses are converted to graphics credits and returned to * * the processor so that the processor interface can manage the flow * * control. * - * * + * * ************************************************************************/ typedef union ii_igfx0_u { - uint64_t ii_igfx0_regval; - struct { - uint64_t i_w_num : 4; - uint64_t i_pi_id : 1; - uint64_t i_n_num : 12; - uint64_t i_p_num : 1; - uint64_t i_rsvd : 46; + uint64_t ii_igfx0_regval; + struct { + uint64_t i_w_num:4; + uint64_t i_pi_id:1; + uint64_t i_n_num:12; + uint64_t i_p_num:1; + uint64_t i_rsvd:46; } ii_igfx0_fld_s; } ii_igfx0_u_t; - /************************************************************************ - * * + * * * There are two instances of this register. This register is used * * for matching up the incoming responses from the graphics widget to * * the processor that initiated the graphics operation. The * * write-responses are converted to graphics credits and returned to * * the processor so that the processor interface can manage the flow * * control. * - * * + * * ************************************************************************/ typedef union ii_igfx1_u { - uint64_t ii_igfx1_regval; - struct { - uint64_t i_w_num : 4; - uint64_t i_pi_id : 1; - uint64_t i_n_num : 12; - uint64_t i_p_num : 1; - uint64_t i_rsvd : 46; + uint64_t ii_igfx1_regval; + struct { + uint64_t i_w_num:4; + uint64_t i_pi_id:1; + uint64_t i_n_num:12; + uint64_t i_p_num:1; + uint64_t i_rsvd:46; } ii_igfx1_fld_s; } ii_igfx1_u_t; - /************************************************************************ - * * + * * * There are two instances of this registers. These registers are * * used as scratch registers for software use. * - * * + * * ************************************************************************/ typedef union ii_iscr0_u { - uint64_t ii_iscr0_regval; - struct { - uint64_t i_scratch : 64; + uint64_t ii_iscr0_regval; + struct { + uint64_t i_scratch:64; } ii_iscr0_fld_s; } ii_iscr0_u_t; - - /************************************************************************ - * * + * * * There are two instances of this registers. These registers are * * used as scratch registers for software use. * - * * + * * ************************************************************************/ typedef union ii_iscr1_u { - uint64_t ii_iscr1_regval; - struct { - uint64_t i_scratch : 64; + uint64_t ii_iscr1_regval; + struct { + uint64_t i_scratch:64; } ii_iscr1_fld_s; } ii_iscr1_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a Shub Big Window to a 48-bit * * address on Crosstalk. * @@ -599,23 +576,22 @@ typedef union ii_iscr1_u { * Crosstalk space addressable by the Shub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte1_u { - uint64_t ii_itte1_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte1_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte1_fld_s; } ii_itte1_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a Shub Big Window to a 48-bit * * address on Crosstalk. * @@ -638,23 +614,22 @@ typedef union ii_itte1_u { * Crosstalk space addressable by the Shub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte2_u { - uint64_t ii_itte2_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte2_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte2_fld_s; } ii_itte2_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a Shub Big Window to a 48-bit * * address on Crosstalk. * @@ -677,23 +652,22 @@ typedef union ii_itte2_u { * Crosstalk space addressable by the SHub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte3_u { - uint64_t ii_itte3_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte3_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte3_fld_s; } ii_itte3_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a SHub Big Window to a 48-bit * * address on Crosstalk. * @@ -716,23 +690,22 @@ typedef union ii_itte3_u { * Crosstalk space addressable by the SHub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte4_u { - uint64_t ii_itte4_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte4_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte4_fld_s; } ii_itte4_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a SHub Big Window to a 48-bit * * address on Crosstalk. * @@ -755,23 +728,22 @@ typedef union ii_itte4_u { * Crosstalk space addressable by the Shub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte5_u { - uint64_t ii_itte5_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte5_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte5_fld_s; } ii_itte5_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a Shub Big Window to a 48-bit * * address on Crosstalk. * @@ -794,23 +766,22 @@ typedef union ii_itte5_u { * Crosstalk space addressable by the Shub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte6_u { - uint64_t ii_itte6_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte6_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte6_fld_s; } ii_itte6_u_t; - /************************************************************************ - * * + * * * Description: There are seven instances of translation table entry * * registers. Each register maps a Shub Big Window to a 48-bit * * address on Crosstalk. * @@ -833,23 +804,22 @@ typedef union ii_itte6_u { * Crosstalk space addressable by the SHub is thus the lower * * 8-GBytes per widget (N-mode), only 7/32nds * * of this space can be accessed. * - * * + * * ************************************************************************/ typedef union ii_itte7_u { - uint64_t ii_itte7_regval; - struct { - uint64_t i_offset : 5; - uint64_t i_rsvd_1 : 3; - uint64_t i_w_num : 4; - uint64_t i_iosp : 1; - uint64_t i_rsvd : 51; + uint64_t ii_itte7_regval; + struct { + uint64_t i_offset:5; + uint64_t i_rsvd_1:3; + uint64_t i_w_num:4; + uint64_t i_iosp:1; + uint64_t i_rsvd:51; } ii_itte7_fld_s; } ii_itte7_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -868,33 +838,32 @@ typedef union ii_itte7_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprb0_u { - uint64_t ii_iprb0_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprb0_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprb0_fld_s; } ii_iprb0_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -913,33 +882,32 @@ typedef union ii_iprb0_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprb8_u { - uint64_t ii_iprb8_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprb8_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprb8_fld_s; } ii_iprb8_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -958,33 +926,32 @@ typedef union ii_iprb8_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprb9_u { - uint64_t ii_iprb9_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprb9_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprb9_fld_s; } ii_iprb9_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -1003,33 +970,32 @@ typedef union ii_iprb9_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * * - * * + * * + * * ************************************************************************/ typedef union ii_iprba_u { - uint64_t ii_iprba_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprba_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprba_fld_s; } ii_iprba_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -1048,33 +1014,32 @@ typedef union ii_iprba_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprbb_u { - uint64_t ii_iprbb_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprbb_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprbb_fld_s; } ii_iprbb_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -1093,33 +1058,32 @@ typedef union ii_iprbb_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprbc_u { - uint64_t ii_iprbc_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprbc_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprbc_fld_s; } ii_iprbc_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -1138,33 +1102,32 @@ typedef union ii_iprbc_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprbd_u { - uint64_t ii_iprbd_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprbd_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprbd_fld_s; } ii_iprbd_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of SHub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -1183,33 +1146,32 @@ typedef union ii_iprbd_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprbe_u { - uint64_t ii_iprbe_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; + uint64_t ii_iprbe_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; } ii_iprbe_fld_s; } ii_iprbe_u_t; - /************************************************************************ - * * + * * * Description: There are 9 instances of this register, one per * * actual widget in this implementation of Shub and Crossbow. * * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * @@ -1228,33 +1190,32 @@ typedef union ii_iprbe_u { * register; the write will correct the C field and capture its new * * value in the internal register. Even if IECLR[E_PRB_x] is set, the * * SPUR_WR bit will persist if IPRBx hasn't yet been written. * - * . * - * * + * . * + * * ************************************************************************/ typedef union ii_iprbf_u { - uint64_t ii_iprbf_regval; - struct { - uint64_t i_c : 8; - uint64_t i_na : 14; - uint64_t i_rsvd_2 : 2; - uint64_t i_nb : 14; - uint64_t i_rsvd_1 : 2; - uint64_t i_m : 2; - uint64_t i_f : 1; - uint64_t i_of_cnt : 5; - uint64_t i_error : 1; - uint64_t i_rd_to : 1; - uint64_t i_spur_wr : 1; - uint64_t i_spur_rd : 1; - uint64_t i_rsvd : 11; - uint64_t i_mult_err : 1; - } ii_iprbe_fld_s; + uint64_t ii_iprbf_regval; + struct { + uint64_t i_c:8; + uint64_t i_na:14; + uint64_t i_rsvd_2:2; + uint64_t i_nb:14; + uint64_t i_rsvd_1:2; + uint64_t i_m:2; + uint64_t i_f:1; + uint64_t i_of_cnt:5; + uint64_t i_error:1; + uint64_t i_rd_to:1; + uint64_t i_spur_wr:1; + uint64_t i_spur_rd:1; + uint64_t i_rsvd:11; + uint64_t i_mult_err:1; + } ii_iprbe_fld_s; } ii_iprbf_u_t; - /************************************************************************ - * * + * * * This register specifies the timeout value to use for monitoring * * Crosstalk credits which are used outbound to Crosstalk. An * * internal counter called the Crosstalk Credit Timeout Counter * @@ -1267,20 +1228,19 @@ typedef union ii_iprbf_u { * Crosstalk Credit Timeout has occurred. The internal counter is not * * readable from software, and stops counting at its maximum value, * * so it cannot cause more than one interrupt. * - * * + * * ************************************************************************/ typedef union ii_ixcc_u { - uint64_t ii_ixcc_regval; - struct { - uint64_t i_time_out : 26; - uint64_t i_rsvd : 38; + uint64_t ii_ixcc_regval; + struct { + uint64_t i_time_out:26; + uint64_t i_rsvd:38; } ii_ixcc_fld_s; } ii_ixcc_u_t; - /************************************************************************ - * * + * * * Description: This register qualifies all the PIO and DMA * * operations launched from widget 0 towards the SHub. In * * addition, it also qualifies accesses by the BTE streams. * @@ -1292,27 +1252,25 @@ typedef union ii_ixcc_u { * the Wx_IAC field. The bits in this field are set by writing a 1 to * * them. Incoming replies from Crosstalk are not subject to this * * access control mechanism. * - * * + * * ************************************************************************/ typedef union ii_imem_u { - uint64_t ii_imem_regval; - struct { - uint64_t i_w0_esd : 1; - uint64_t i_rsvd_3 : 3; - uint64_t i_b0_esd : 1; - uint64_t i_rsvd_2 : 3; - uint64_t i_b1_esd : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_clr_precise : 1; - uint64_t i_rsvd : 51; + uint64_t ii_imem_regval; + struct { + uint64_t i_w0_esd:1; + uint64_t i_rsvd_3:3; + uint64_t i_b0_esd:1; + uint64_t i_rsvd_2:3; + uint64_t i_b1_esd:1; + uint64_t i_rsvd_1:3; + uint64_t i_clr_precise:1; + uint64_t i_rsvd:51; } ii_imem_fld_s; } ii_imem_u_t; - - /************************************************************************ - * * + * * * Description: This register specifies the timeout value to use for * * monitoring Crosstalk tail flits coming into the Shub in the * * TAIL_TO field. An internal counter associated with this register * @@ -1332,90 +1290,87 @@ typedef union ii_imem_u { * the value in the RRSP_TO field, a Read Response Timeout has * * occurred, and error handling occurs as described in the Error * * Handling section of this document. * - * * + * * ************************************************************************/ typedef union ii_ixtt_u { - uint64_t ii_ixtt_regval; - struct { - uint64_t i_tail_to : 26; - uint64_t i_rsvd_1 : 6; - uint64_t i_rrsp_ps : 23; - uint64_t i_rrsp_to : 5; - uint64_t i_rsvd : 4; + uint64_t ii_ixtt_regval; + struct { + uint64_t i_tail_to:26; + uint64_t i_rsvd_1:6; + uint64_t i_rrsp_ps:23; + uint64_t i_rrsp_to:5; + uint64_t i_rsvd:4; } ii_ixtt_fld_s; } ii_ixtt_u_t; - /************************************************************************ - * * + * * * Writing a 1 to the fields of this register clears the appropriate * * error bits in other areas of SHub. Note that when the * * E_PRB_x bits are used to clear error bits in PRB registers, * * SPUR_RD and SPUR_WR may persist, because they require additional * * action to clear them. See the IPRBx and IXSS Register * * specifications. * - * * + * * ************************************************************************/ typedef union ii_ieclr_u { - uint64_t ii_ieclr_regval; - struct { - uint64_t i_e_prb_0 : 1; - uint64_t i_rsvd : 7; - uint64_t i_e_prb_8 : 1; - uint64_t i_e_prb_9 : 1; - uint64_t i_e_prb_a : 1; - uint64_t i_e_prb_b : 1; - uint64_t i_e_prb_c : 1; - uint64_t i_e_prb_d : 1; - uint64_t i_e_prb_e : 1; - uint64_t i_e_prb_f : 1; - uint64_t i_e_crazy : 1; - uint64_t i_e_bte_0 : 1; - uint64_t i_e_bte_1 : 1; - uint64_t i_reserved_1 : 10; - uint64_t i_spur_rd_hdr : 1; - uint64_t i_cam_intr_to : 1; - uint64_t i_cam_overflow : 1; - uint64_t i_cam_read_miss : 1; - uint64_t i_ioq_rep_underflow : 1; - uint64_t i_ioq_req_underflow : 1; - uint64_t i_ioq_rep_overflow : 1; - uint64_t i_ioq_req_overflow : 1; - uint64_t i_iiq_rep_overflow : 1; - uint64_t i_iiq_req_overflow : 1; - uint64_t i_ii_xn_rep_cred_overflow : 1; - uint64_t i_ii_xn_req_cred_overflow : 1; - uint64_t i_ii_xn_invalid_cmd : 1; - uint64_t i_xn_ii_invalid_cmd : 1; - uint64_t i_reserved_2 : 21; + uint64_t ii_ieclr_regval; + struct { + uint64_t i_e_prb_0:1; + uint64_t i_rsvd:7; + uint64_t i_e_prb_8:1; + uint64_t i_e_prb_9:1; + uint64_t i_e_prb_a:1; + uint64_t i_e_prb_b:1; + uint64_t i_e_prb_c:1; + uint64_t i_e_prb_d:1; + uint64_t i_e_prb_e:1; + uint64_t i_e_prb_f:1; + uint64_t i_e_crazy:1; + uint64_t i_e_bte_0:1; + uint64_t i_e_bte_1:1; + uint64_t i_reserved_1:10; + uint64_t i_spur_rd_hdr:1; + uint64_t i_cam_intr_to:1; + uint64_t i_cam_overflow:1; + uint64_t i_cam_read_miss:1; + uint64_t i_ioq_rep_underflow:1; + uint64_t i_ioq_req_underflow:1; + uint64_t i_ioq_rep_overflow:1; + uint64_t i_ioq_req_overflow:1; + uint64_t i_iiq_rep_overflow:1; + uint64_t i_iiq_req_overflow:1; + uint64_t i_ii_xn_rep_cred_overflow:1; + uint64_t i_ii_xn_req_cred_overflow:1; + uint64_t i_ii_xn_invalid_cmd:1; + uint64_t i_xn_ii_invalid_cmd:1; + uint64_t i_reserved_2:21; } ii_ieclr_fld_s; } ii_ieclr_u_t; - /************************************************************************ - * * + * * * This register controls both BTEs. SOFT_RESET is intended for * * recovery after an error. COUNT controls the total number of CRBs * * that both BTEs (combined) can use, which affects total BTE * * bandwidth. * - * * + * * ************************************************************************/ typedef union ii_ibcr_u { - uint64_t ii_ibcr_regval; - struct { - uint64_t i_count : 4; - uint64_t i_rsvd_1 : 4; - uint64_t i_soft_reset : 1; - uint64_t i_rsvd : 55; + uint64_t ii_ibcr_regval; + struct { + uint64_t i_count:4; + uint64_t i_rsvd_1:4; + uint64_t i_soft_reset:1; + uint64_t i_rsvd:55; } ii_ibcr_fld_s; } ii_ibcr_u_t; - /************************************************************************ - * * + * * * This register contains the header of a spurious read response * * received from Crosstalk. A spurious read response is defined as a * * read response received by II from a widget for which (1) the SIDN * @@ -1440,49 +1395,47 @@ typedef union ii_ibcr_u { * will be set. Any SPUR_RD bits in any other PRB registers indicate * * spurious messages from other widets which were detected after the * * header was captured.. * - * * + * * ************************************************************************/ typedef union ii_ixsm_u { - uint64_t ii_ixsm_regval; - struct { - uint64_t i_byte_en : 32; - uint64_t i_reserved : 1; - uint64_t i_tag : 3; - uint64_t i_alt_pactyp : 4; - uint64_t i_bo : 1; - uint64_t i_error : 1; - uint64_t i_vbpm : 1; - uint64_t i_gbr : 1; - uint64_t i_ds : 2; - uint64_t i_ct : 1; - uint64_t i_tnum : 5; - uint64_t i_pactyp : 4; - uint64_t i_sidn : 4; - uint64_t i_didn : 4; + uint64_t ii_ixsm_regval; + struct { + uint64_t i_byte_en:32; + uint64_t i_reserved:1; + uint64_t i_tag:3; + uint64_t i_alt_pactyp:4; + uint64_t i_bo:1; + uint64_t i_error:1; + uint64_t i_vbpm:1; + uint64_t i_gbr:1; + uint64_t i_ds:2; + uint64_t i_ct:1; + uint64_t i_tnum:5; + uint64_t i_pactyp:4; + uint64_t i_sidn:4; + uint64_t i_didn:4; } ii_ixsm_fld_s; } ii_ixsm_u_t; - /************************************************************************ - * * + * * * This register contains the sideband bits of a spurious read * * response received from Crosstalk. * - * * + * * ************************************************************************/ typedef union ii_ixss_u { - uint64_t ii_ixss_regval; - struct { - uint64_t i_sideband : 8; - uint64_t i_rsvd : 55; - uint64_t i_valid : 1; + uint64_t ii_ixss_regval; + struct { + uint64_t i_sideband:8; + uint64_t i_rsvd:55; + uint64_t i_valid:1; } ii_ixss_fld_s; } ii_ixss_u_t; - /************************************************************************ - * * + * * * This register enables software to access the II LLP's test port. * * Refer to the LLP 2.5 documentation for an explanation of the test * * port. Software can write to this register to program the values * @@ -1490,27 +1443,26 @@ typedef union ii_ixss_u { * TestMask and TestSeed). Similarly, software can read from this * * register to obtain the values of the test port's status outputs * * (TestCBerr, TestValid and TestData). * - * * + * * ************************************************************************/ typedef union ii_ilct_u { - uint64_t ii_ilct_regval; - struct { - uint64_t i_test_seed : 20; - uint64_t i_test_mask : 8; - uint64_t i_test_data : 20; - uint64_t i_test_valid : 1; - uint64_t i_test_cberr : 1; - uint64_t i_test_flit : 3; - uint64_t i_test_clear : 1; - uint64_t i_test_err_capture : 1; - uint64_t i_rsvd : 9; + uint64_t ii_ilct_regval; + struct { + uint64_t i_test_seed:20; + uint64_t i_test_mask:8; + uint64_t i_test_data:20; + uint64_t i_test_valid:1; + uint64_t i_test_cberr:1; + uint64_t i_test_flit:3; + uint64_t i_test_clear:1; + uint64_t i_test_err_capture:1; + uint64_t i_rsvd:9; } ii_ilct_fld_s; } ii_ilct_u_t; - /************************************************************************ - * * + * * * If the II detects an illegal incoming Duplonet packet (request or * * reply) when VALID==0 in the IIEPH1 register, then it saves the * * contents of the packet's header flit in the IIEPH1 and IIEPH2 * @@ -1526,575 +1478,549 @@ typedef union ii_ilct_u { * packet when VALID==1 in the IIEPH1 register, then it merely sets * * the OVERRUN bit to indicate that a subsequent error has happened, * * and does nothing further. * - * * + * * ************************************************************************/ typedef union ii_iieph1_u { - uint64_t ii_iieph1_regval; - struct { - uint64_t i_command : 7; - uint64_t i_rsvd_5 : 1; - uint64_t i_suppl : 14; - uint64_t i_rsvd_4 : 1; - uint64_t i_source : 14; - uint64_t i_rsvd_3 : 1; - uint64_t i_err_type : 4; - uint64_t i_rsvd_2 : 4; - uint64_t i_overrun : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_valid : 1; - uint64_t i_rsvd : 13; + uint64_t ii_iieph1_regval; + struct { + uint64_t i_command:7; + uint64_t i_rsvd_5:1; + uint64_t i_suppl:14; + uint64_t i_rsvd_4:1; + uint64_t i_source:14; + uint64_t i_rsvd_3:1; + uint64_t i_err_type:4; + uint64_t i_rsvd_2:4; + uint64_t i_overrun:1; + uint64_t i_rsvd_1:3; + uint64_t i_valid:1; + uint64_t i_rsvd:13; } ii_iieph1_fld_s; } ii_iieph1_u_t; - /************************************************************************ - * * + * * * This register holds the Address field from the header flit of an * * incoming erroneous Duplonet packet, along with the tail bit which * * accompanied this header flit. This register is essentially an * * extension of IIEPH1. Two registers were necessary because the 64 * * bits available in only a single register were insufficient to * * capture the entire header flit of an erroneous packet. * - * * + * * ************************************************************************/ typedef union ii_iieph2_u { - uint64_t ii_iieph2_regval; - struct { - uint64_t i_rsvd_0 : 3; - uint64_t i_address : 47; - uint64_t i_rsvd_1 : 10; - uint64_t i_tail : 1; - uint64_t i_rsvd : 3; + uint64_t ii_iieph2_regval; + struct { + uint64_t i_rsvd_0:3; + uint64_t i_address:47; + uint64_t i_rsvd_1:10; + uint64_t i_tail:1; + uint64_t i_rsvd:3; } ii_iieph2_fld_s; } ii_iieph2_u_t; - /******************************/ - - /************************************************************************ - * * + * * * This register's value is a bit vector that guards access from SXBs * * to local registers within the II as well as to external Crosstalk * * widgets * - * * + * * ************************************************************************/ typedef union ii_islapr_u { - uint64_t ii_islapr_regval; - struct { - uint64_t i_region : 64; + uint64_t ii_islapr_regval; + struct { + uint64_t i_region:64; } ii_islapr_fld_s; } ii_islapr_u_t; - /************************************************************************ - * * + * * * A write to this register of the 56-bit value "Pup+Bun" will cause * * the bit in the ISLAPR register corresponding to the region of the * * requestor to be set (access allowed). ( - * * + * * ************************************************************************/ typedef union ii_islapo_u { - uint64_t ii_islapo_regval; - struct { - uint64_t i_io_sbx_ovrride : 56; - uint64_t i_rsvd : 8; + uint64_t ii_islapo_regval; + struct { + uint64_t i_io_sbx_ovrride:56; + uint64_t i_rsvd:8; } ii_islapo_fld_s; } ii_islapo_u_t; /************************************************************************ - * * + * * * Determines how long the wrapper will wait aftr an interrupt is * * initially issued from the II before it times out the outstanding * * interrupt and drops it from the interrupt queue. * - * * + * * ************************************************************************/ typedef union ii_iwi_u { - uint64_t ii_iwi_regval; - struct { - uint64_t i_prescale : 24; - uint64_t i_rsvd : 8; - uint64_t i_timeout : 8; - uint64_t i_rsvd1 : 8; - uint64_t i_intrpt_retry_period : 8; - uint64_t i_rsvd2 : 8; + uint64_t ii_iwi_regval; + struct { + uint64_t i_prescale:24; + uint64_t i_rsvd:8; + uint64_t i_timeout:8; + uint64_t i_rsvd1:8; + uint64_t i_intrpt_retry_period:8; + uint64_t i_rsvd2:8; } ii_iwi_fld_s; } ii_iwi_u_t; /************************************************************************ - * * + * * * Log errors which have occurred in the II wrapper. The errors are * * cleared by writing to the IECLR register. * - * * + * * ************************************************************************/ typedef union ii_iwel_u { - uint64_t ii_iwel_regval; - struct { - uint64_t i_intr_timed_out : 1; - uint64_t i_rsvd : 7; - uint64_t i_cam_overflow : 1; - uint64_t i_cam_read_miss : 1; - uint64_t i_rsvd1 : 2; - uint64_t i_ioq_rep_underflow : 1; - uint64_t i_ioq_req_underflow : 1; - uint64_t i_ioq_rep_overflow : 1; - uint64_t i_ioq_req_overflow : 1; - uint64_t i_iiq_rep_overflow : 1; - uint64_t i_iiq_req_overflow : 1; - uint64_t i_rsvd2 : 6; - uint64_t i_ii_xn_rep_cred_over_under: 1; - uint64_t i_ii_xn_req_cred_over_under: 1; - uint64_t i_rsvd3 : 6; - uint64_t i_ii_xn_invalid_cmd : 1; - uint64_t i_xn_ii_invalid_cmd : 1; - uint64_t i_rsvd4 : 30; + uint64_t ii_iwel_regval; + struct { + uint64_t i_intr_timed_out:1; + uint64_t i_rsvd:7; + uint64_t i_cam_overflow:1; + uint64_t i_cam_read_miss:1; + uint64_t i_rsvd1:2; + uint64_t i_ioq_rep_underflow:1; + uint64_t i_ioq_req_underflow:1; + uint64_t i_ioq_rep_overflow:1; + uint64_t i_ioq_req_overflow:1; + uint64_t i_iiq_rep_overflow:1; + uint64_t i_iiq_req_overflow:1; + uint64_t i_rsvd2:6; + uint64_t i_ii_xn_rep_cred_over_under:1; + uint64_t i_ii_xn_req_cred_over_under:1; + uint64_t i_rsvd3:6; + uint64_t i_ii_xn_invalid_cmd:1; + uint64_t i_xn_ii_invalid_cmd:1; + uint64_t i_rsvd4:30; } ii_iwel_fld_s; } ii_iwel_u_t; /************************************************************************ - * * + * * * Controls the II wrapper. * - * * + * * ************************************************************************/ typedef union ii_iwc_u { - uint64_t ii_iwc_regval; - struct { - uint64_t i_dma_byte_swap : 1; - uint64_t i_rsvd : 3; - uint64_t i_cam_read_lines_reset : 1; - uint64_t i_rsvd1 : 3; - uint64_t i_ii_xn_cred_over_under_log: 1; - uint64_t i_rsvd2 : 19; - uint64_t i_xn_rep_iq_depth : 5; - uint64_t i_rsvd3 : 3; - uint64_t i_xn_req_iq_depth : 5; - uint64_t i_rsvd4 : 3; - uint64_t i_iiq_depth : 6; - uint64_t i_rsvd5 : 12; - uint64_t i_force_rep_cred : 1; - uint64_t i_force_req_cred : 1; + uint64_t ii_iwc_regval; + struct { + uint64_t i_dma_byte_swap:1; + uint64_t i_rsvd:3; + uint64_t i_cam_read_lines_reset:1; + uint64_t i_rsvd1:3; + uint64_t i_ii_xn_cred_over_under_log:1; + uint64_t i_rsvd2:19; + uint64_t i_xn_rep_iq_depth:5; + uint64_t i_rsvd3:3; + uint64_t i_xn_req_iq_depth:5; + uint64_t i_rsvd4:3; + uint64_t i_iiq_depth:6; + uint64_t i_rsvd5:12; + uint64_t i_force_rep_cred:1; + uint64_t i_force_req_cred:1; } ii_iwc_fld_s; } ii_iwc_u_t; /************************************************************************ - * * + * * * Status in the II wrapper. * - * * + * * ************************************************************************/ typedef union ii_iws_u { - uint64_t ii_iws_regval; - struct { - uint64_t i_xn_rep_iq_credits : 5; - uint64_t i_rsvd : 3; - uint64_t i_xn_req_iq_credits : 5; - uint64_t i_rsvd1 : 51; + uint64_t ii_iws_regval; + struct { + uint64_t i_xn_rep_iq_credits:5; + uint64_t i_rsvd:3; + uint64_t i_xn_req_iq_credits:5; + uint64_t i_rsvd1:51; } ii_iws_fld_s; } ii_iws_u_t; /************************************************************************ - * * + * * * Masks errors in the IWEL register. * - * * + * * ************************************************************************/ typedef union ii_iweim_u { - uint64_t ii_iweim_regval; - struct { - uint64_t i_intr_timed_out : 1; - uint64_t i_rsvd : 7; - uint64_t i_cam_overflow : 1; - uint64_t i_cam_read_miss : 1; - uint64_t i_rsvd1 : 2; - uint64_t i_ioq_rep_underflow : 1; - uint64_t i_ioq_req_underflow : 1; - uint64_t i_ioq_rep_overflow : 1; - uint64_t i_ioq_req_overflow : 1; - uint64_t i_iiq_rep_overflow : 1; - uint64_t i_iiq_req_overflow : 1; - uint64_t i_rsvd2 : 6; - uint64_t i_ii_xn_rep_cred_overflow : 1; - uint64_t i_ii_xn_req_cred_overflow : 1; - uint64_t i_rsvd3 : 6; - uint64_t i_ii_xn_invalid_cmd : 1; - uint64_t i_xn_ii_invalid_cmd : 1; - uint64_t i_rsvd4 : 30; + uint64_t ii_iweim_regval; + struct { + uint64_t i_intr_timed_out:1; + uint64_t i_rsvd:7; + uint64_t i_cam_overflow:1; + uint64_t i_cam_read_miss:1; + uint64_t i_rsvd1:2; + uint64_t i_ioq_rep_underflow:1; + uint64_t i_ioq_req_underflow:1; + uint64_t i_ioq_rep_overflow:1; + uint64_t i_ioq_req_overflow:1; + uint64_t i_iiq_rep_overflow:1; + uint64_t i_iiq_req_overflow:1; + uint64_t i_rsvd2:6; + uint64_t i_ii_xn_rep_cred_overflow:1; + uint64_t i_ii_xn_req_cred_overflow:1; + uint64_t i_rsvd3:6; + uint64_t i_ii_xn_invalid_cmd:1; + uint64_t i_xn_ii_invalid_cmd:1; + uint64_t i_rsvd4:30; } ii_iweim_fld_s; } ii_iweim_u_t; - /************************************************************************ - * * + * * * A write to this register causes a particular field in the * * corresponding widget's PRB entry to be adjusted up or down by 1. * * This counter should be used when recovering from error and reset * * conditions. Note that software would be capable of causing * * inadvertent overflow or underflow of these counters. * - * * + * * ************************************************************************/ typedef union ii_ipca_u { - uint64_t ii_ipca_regval; - struct { - uint64_t i_wid : 4; - uint64_t i_adjust : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_field : 2; - uint64_t i_rsvd : 54; + uint64_t ii_ipca_regval; + struct { + uint64_t i_wid:4; + uint64_t i_adjust:1; + uint64_t i_rsvd_1:3; + uint64_t i_field:2; + uint64_t i_rsvd:54; } ii_ipca_fld_s; } ii_ipca_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ - typedef union ii_iprte0a_u { - uint64_t ii_iprte0a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte0a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte0a_fld_s; } ii_iprte0a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte1a_u { - uint64_t ii_iprte1a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte1a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte1a_fld_s; } ii_iprte1a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte2a_u { - uint64_t ii_iprte2a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte2a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte2a_fld_s; } ii_iprte2a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte3a_u { - uint64_t ii_iprte3a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte3a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte3a_fld_s; } ii_iprte3a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte4a_u { - uint64_t ii_iprte4a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte4a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte4a_fld_s; } ii_iprte4a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte5a_u { - uint64_t ii_iprte5a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte5a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte5a_fld_s; } ii_iprte5a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte6a_u { - uint64_t ii_iprte6a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t ii_iprte6a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } ii_iprte6a_fld_s; } ii_iprte6a_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte7a_u { - uint64_t ii_iprte7a_regval; - struct { - uint64_t i_rsvd_1 : 54; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; - } ii_iprtea7_fld_s; + uint64_t ii_iprte7a_regval; + struct { + uint64_t i_rsvd_1:54; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; + } ii_iprtea7_fld_s; } ii_iprte7a_u_t; - - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ - typedef union ii_iprte0b_u { - uint64_t ii_iprte0b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte0b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte0b_fld_s; } ii_iprte0b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte1b_u { - uint64_t ii_iprte1b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte1b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte1b_fld_s; } ii_iprte1b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte2b_u { - uint64_t ii_iprte2b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte2b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte2b_fld_s; } ii_iprte2b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte3b_u { - uint64_t ii_iprte3b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte3b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte3b_fld_s; } ii_iprte3b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte4b_u { - uint64_t ii_iprte4b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte4b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte4b_fld_s; } ii_iprte4b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte5b_u { - uint64_t ii_iprte5b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte5b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte5b_fld_s; } ii_iprte5b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte6b_u { - uint64_t ii_iprte6b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; + uint64_t ii_iprte6b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; } ii_iprte6b_fld_s; } ii_iprte6b_u_t; - /************************************************************************ - * * + * * * There are 8 instances of this register. This register contains * * the information that the II has to remember once it has launched a * * PIO Read operation. The contents are used to form the correct * * Router Network packet and direct the Crosstalk reply to the * * appropriate processor. * - * * + * * ************************************************************************/ typedef union ii_iprte7b_u { - uint64_t ii_iprte7b_regval; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_address : 47; - uint64_t i_init : 3; - uint64_t i_source : 11; - } ii_iprte7b_fld_s; + uint64_t ii_iprte7b_regval; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_address:47; + uint64_t i_init:3; + uint64_t i_source:11; + } ii_iprte7b_fld_s; } ii_iprte7b_u_t; - /************************************************************************ - * * + * * * Description: SHub II contains a feature which did not exist in * * the Hub which automatically cleans up after a Read Response * * timeout, including deallocation of the IPRTE and recovery of IBuf * @@ -2108,23 +2034,22 @@ typedef union ii_iprte7b_u { * Note that this register does not affect the contents of the IPRTE * * registers. The Valid bits in those registers have to be * * specifically turned off by software. * - * * + * * ************************************************************************/ typedef union ii_ipdr_u { - uint64_t ii_ipdr_regval; - struct { - uint64_t i_te : 3; - uint64_t i_rsvd_1 : 1; - uint64_t i_pnd : 1; - uint64_t i_init_rpcnt : 1; - uint64_t i_rsvd : 58; + uint64_t ii_ipdr_regval; + struct { + uint64_t i_te:3; + uint64_t i_rsvd_1:1; + uint64_t i_pnd:1; + uint64_t i_init_rpcnt:1; + uint64_t i_rsvd:58; } ii_ipdr_fld_s; } ii_ipdr_u_t; - /************************************************************************ - * * + * * * A write to this register causes a CRB entry to be returned to the * * queue of free CRBs. The entry should have previously been cleared * * (mark bit) via backdoor access to the pertinent CRB entry. This * @@ -2137,21 +2062,20 @@ typedef union ii_ipdr_u { * software clears the mark bit, and finally 4) software writes to * * the ICDR register to return the CRB entry to the list of free CRB * * entries. * - * * + * * ************************************************************************/ typedef union ii_icdr_u { - uint64_t ii_icdr_regval; - struct { - uint64_t i_crb_num : 4; - uint64_t i_pnd : 1; - uint64_t i_rsvd : 59; + uint64_t ii_icdr_regval; + struct { + uint64_t i_crb_num:4; + uint64_t i_pnd:1; + uint64_t i_rsvd:59; } ii_icdr_fld_s; } ii_icdr_u_t; - /************************************************************************ - * * + * * * This register provides debug access to two FIFOs inside of II. * * Both IOQ_MAX* fields of this register contain the instantaneous * * depth (in units of the number of available entries) of the * @@ -2164,130 +2088,124 @@ typedef union ii_icdr_u { * this register is written. If there are any active entries in any * * of these FIFOs when this register is written, the results are * * undefined. * - * * + * * ************************************************************************/ typedef union ii_ifdr_u { - uint64_t ii_ifdr_regval; - struct { - uint64_t i_ioq_max_rq : 7; - uint64_t i_set_ioq_rq : 1; - uint64_t i_ioq_max_rp : 7; - uint64_t i_set_ioq_rp : 1; - uint64_t i_rsvd : 48; + uint64_t ii_ifdr_regval; + struct { + uint64_t i_ioq_max_rq:7; + uint64_t i_set_ioq_rq:1; + uint64_t i_ioq_max_rp:7; + uint64_t i_set_ioq_rp:1; + uint64_t i_rsvd:48; } ii_ifdr_fld_s; } ii_ifdr_u_t; - /************************************************************************ - * * + * * * This register allows the II to become sluggish in removing * * messages from its inbound queue (IIQ). This will cause messages to * * back up in either virtual channel. Disabling the "molasses" mode * * subsequently allows the II to be tested under stress. In the * * sluggish ("Molasses") mode, the localized effects of congestion * * can be observed. * - * * + * * ************************************************************************/ typedef union ii_iiap_u { - uint64_t ii_iiap_regval; - struct { - uint64_t i_rq_mls : 6; - uint64_t i_rsvd_1 : 2; - uint64_t i_rp_mls : 6; - uint64_t i_rsvd : 50; - } ii_iiap_fld_s; + uint64_t ii_iiap_regval; + struct { + uint64_t i_rq_mls:6; + uint64_t i_rsvd_1:2; + uint64_t i_rp_mls:6; + uint64_t i_rsvd:50; + } ii_iiap_fld_s; } ii_iiap_u_t; - /************************************************************************ - * * + * * * This register allows several parameters of CRB operation to be * * set. Note that writing to this register can have catastrophic side * * effects, if the CRB is not quiescent, i.e. if the CRB is * * processing protocol messages when the write occurs. * - * * + * * ************************************************************************/ typedef union ii_icmr_u { - uint64_t ii_icmr_regval; - struct { - uint64_t i_sp_msg : 1; - uint64_t i_rd_hdr : 1; - uint64_t i_rsvd_4 : 2; - uint64_t i_c_cnt : 4; - uint64_t i_rsvd_3 : 4; - uint64_t i_clr_rqpd : 1; - uint64_t i_clr_rppd : 1; - uint64_t i_rsvd_2 : 2; - uint64_t i_fc_cnt : 4; - uint64_t i_crb_vld : 15; - uint64_t i_crb_mark : 15; - uint64_t i_rsvd_1 : 2; - uint64_t i_precise : 1; - uint64_t i_rsvd : 11; + uint64_t ii_icmr_regval; + struct { + uint64_t i_sp_msg:1; + uint64_t i_rd_hdr:1; + uint64_t i_rsvd_4:2; + uint64_t i_c_cnt:4; + uint64_t i_rsvd_3:4; + uint64_t i_clr_rqpd:1; + uint64_t i_clr_rppd:1; + uint64_t i_rsvd_2:2; + uint64_t i_fc_cnt:4; + uint64_t i_crb_vld:15; + uint64_t i_crb_mark:15; + uint64_t i_rsvd_1:2; + uint64_t i_precise:1; + uint64_t i_rsvd:11; } ii_icmr_fld_s; } ii_icmr_u_t; - /************************************************************************ - * * + * * * This register allows control of the table portion of the CRB * * logic via software. Control operations from this register have * * priority over all incoming Crosstalk or BTE requests. * - * * + * * ************************************************************************/ typedef union ii_iccr_u { - uint64_t ii_iccr_regval; - struct { - uint64_t i_crb_num : 4; - uint64_t i_rsvd_1 : 4; - uint64_t i_cmd : 8; - uint64_t i_pending : 1; - uint64_t i_rsvd : 47; + uint64_t ii_iccr_regval; + struct { + uint64_t i_crb_num:4; + uint64_t i_rsvd_1:4; + uint64_t i_cmd:8; + uint64_t i_pending:1; + uint64_t i_rsvd:47; } ii_iccr_fld_s; } ii_iccr_u_t; - /************************************************************************ - * * + * * * This register allows the maximum timeout value to be programmed. * - * * + * * ************************************************************************/ typedef union ii_icto_u { - uint64_t ii_icto_regval; - struct { - uint64_t i_timeout : 8; - uint64_t i_rsvd : 56; + uint64_t ii_icto_regval; + struct { + uint64_t i_timeout:8; + uint64_t i_rsvd:56; } ii_icto_fld_s; } ii_icto_u_t; - /************************************************************************ - * * + * * * This register allows the timeout prescalar to be programmed. An * * internal counter is associated with this register. When the * * internal counter reaches the value of the PRESCALE field, the * * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] * * field). The internal counter resets to zero, and then continues * * counting. * - * * + * * ************************************************************************/ typedef union ii_ictp_u { - uint64_t ii_ictp_regval; - struct { - uint64_t i_prescale : 24; - uint64_t i_rsvd : 40; + uint64_t ii_ictp_regval; + struct { + uint64_t i_prescale:24; + uint64_t i_rsvd:40; } ii_ictp_fld_s; } ii_ictp_u_t; - /************************************************************************ - * * + * * * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * * used for Crosstalk operations (both cacheline and partial * * operations) or BTE/IO. Because the CRB entries are very wide, five * @@ -2306,243 +2224,234 @@ typedef union ii_ictp_u { * recovering any potential error state from before the reset). * * The following four tables summarize the format for the four * * registers that are used for each ICRB# Entry. * - * * + * * ************************************************************************/ typedef union ii_icrb0_a_u { - uint64_t ii_icrb0_a_regval; - struct { - uint64_t ia_iow : 1; - uint64_t ia_vld : 1; - uint64_t ia_addr : 47; - uint64_t ia_tnum : 5; - uint64_t ia_sidn : 4; - uint64_t ia_rsvd : 6; + uint64_t ii_icrb0_a_regval; + struct { + uint64_t ia_iow:1; + uint64_t ia_vld:1; + uint64_t ia_addr:47; + uint64_t ia_tnum:5; + uint64_t ia_sidn:4; + uint64_t ia_rsvd:6; } ii_icrb0_a_fld_s; } ii_icrb0_a_u_t; - /************************************************************************ - * * + * * * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * * used for Crosstalk operations (both cacheline and partial * * operations) or BTE/IO. Because the CRB entries are very wide, five * * registers (_A to _E) are required to read and write each entry. * - * * + * * ************************************************************************/ typedef union ii_icrb0_b_u { - uint64_t ii_icrb0_b_regval; - struct { - uint64_t ib_xt_err : 1; - uint64_t ib_mark : 1; - uint64_t ib_ln_uce : 1; - uint64_t ib_errcode : 3; - uint64_t ib_error : 1; - uint64_t ib_stall__bte_1 : 1; - uint64_t ib_stall__bte_0 : 1; - uint64_t ib_stall__intr : 1; - uint64_t ib_stall_ib : 1; - uint64_t ib_intvn : 1; - uint64_t ib_wb : 1; - uint64_t ib_hold : 1; - uint64_t ib_ack : 1; - uint64_t ib_resp : 1; - uint64_t ib_ack_cnt : 11; - uint64_t ib_rsvd : 7; - uint64_t ib_exc : 5; - uint64_t ib_init : 3; - uint64_t ib_imsg : 8; - uint64_t ib_imsgtype : 2; - uint64_t ib_use_old : 1; - uint64_t ib_rsvd_1 : 11; + uint64_t ii_icrb0_b_regval; + struct { + uint64_t ib_xt_err:1; + uint64_t ib_mark:1; + uint64_t ib_ln_uce:1; + uint64_t ib_errcode:3; + uint64_t ib_error:1; + uint64_t ib_stall__bte_1:1; + uint64_t ib_stall__bte_0:1; + uint64_t ib_stall__intr:1; + uint64_t ib_stall_ib:1; + uint64_t ib_intvn:1; + uint64_t ib_wb:1; + uint64_t ib_hold:1; + uint64_t ib_ack:1; + uint64_t ib_resp:1; + uint64_t ib_ack_cnt:11; + uint64_t ib_rsvd:7; + uint64_t ib_exc:5; + uint64_t ib_init:3; + uint64_t ib_imsg:8; + uint64_t ib_imsgtype:2; + uint64_t ib_use_old:1; + uint64_t ib_rsvd_1:11; } ii_icrb0_b_fld_s; } ii_icrb0_b_u_t; - /************************************************************************ - * * + * * * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * * used for Crosstalk operations (both cacheline and partial * * operations) or BTE/IO. Because the CRB entries are very wide, five * * registers (_A to _E) are required to read and write each entry. * - * * + * * ************************************************************************/ typedef union ii_icrb0_c_u { - uint64_t ii_icrb0_c_regval; - struct { - uint64_t ic_source : 15; - uint64_t ic_size : 2; - uint64_t ic_ct : 1; - uint64_t ic_bte_num : 1; - uint64_t ic_gbr : 1; - uint64_t ic_resprqd : 1; - uint64_t ic_bo : 1; - uint64_t ic_suppl : 15; - uint64_t ic_rsvd : 27; + uint64_t ii_icrb0_c_regval; + struct { + uint64_t ic_source:15; + uint64_t ic_size:2; + uint64_t ic_ct:1; + uint64_t ic_bte_num:1; + uint64_t ic_gbr:1; + uint64_t ic_resprqd:1; + uint64_t ic_bo:1; + uint64_t ic_suppl:15; + uint64_t ic_rsvd:27; } ii_icrb0_c_fld_s; } ii_icrb0_c_u_t; - /************************************************************************ - * * + * * * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * * used for Crosstalk operations (both cacheline and partial * * operations) or BTE/IO. Because the CRB entries are very wide, five * * registers (_A to _E) are required to read and write each entry. * - * * + * * ************************************************************************/ typedef union ii_icrb0_d_u { - uint64_t ii_icrb0_d_regval; - struct { - uint64_t id_pa_be : 43; - uint64_t id_bte_op : 1; - uint64_t id_pr_psc : 4; - uint64_t id_pr_cnt : 4; - uint64_t id_sleep : 1; - uint64_t id_rsvd : 11; + uint64_t ii_icrb0_d_regval; + struct { + uint64_t id_pa_be:43; + uint64_t id_bte_op:1; + uint64_t id_pr_psc:4; + uint64_t id_pr_cnt:4; + uint64_t id_sleep:1; + uint64_t id_rsvd:11; } ii_icrb0_d_fld_s; } ii_icrb0_d_u_t; - /************************************************************************ - * * + * * * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * * used for Crosstalk operations (both cacheline and partial * * operations) or BTE/IO. Because the CRB entries are very wide, five * * registers (_A to _E) are required to read and write each entry. * - * * + * * ************************************************************************/ typedef union ii_icrb0_e_u { - uint64_t ii_icrb0_e_regval; - struct { - uint64_t ie_timeout : 8; - uint64_t ie_context : 15; - uint64_t ie_rsvd : 1; - uint64_t ie_tvld : 1; - uint64_t ie_cvld : 1; - uint64_t ie_rsvd_0 : 38; + uint64_t ii_icrb0_e_regval; + struct { + uint64_t ie_timeout:8; + uint64_t ie_context:15; + uint64_t ie_rsvd:1; + uint64_t ie_tvld:1; + uint64_t ie_cvld:1; + uint64_t ie_rsvd_0:38; } ii_icrb0_e_fld_s; } ii_icrb0_e_u_t; - /************************************************************************ - * * + * * * This register contains the lower 64 bits of the header of the * * spurious message captured by II. Valid when the SP_MSG bit in ICMR * * register is set. * - * * + * * ************************************************************************/ typedef union ii_icsml_u { - uint64_t ii_icsml_regval; - struct { - uint64_t i_tt_addr : 47; - uint64_t i_newsuppl_ex : 14; - uint64_t i_reserved : 2; - uint64_t i_overflow : 1; + uint64_t ii_icsml_regval; + struct { + uint64_t i_tt_addr:47; + uint64_t i_newsuppl_ex:14; + uint64_t i_reserved:2; + uint64_t i_overflow:1; } ii_icsml_fld_s; } ii_icsml_u_t; - /************************************************************************ - * * + * * * This register contains the middle 64 bits of the header of the * * spurious message captured by II. Valid when the SP_MSG bit in ICMR * * register is set. * - * * + * * ************************************************************************/ typedef union ii_icsmm_u { - uint64_t ii_icsmm_regval; - struct { - uint64_t i_tt_ack_cnt : 11; - uint64_t i_reserved : 53; + uint64_t ii_icsmm_regval; + struct { + uint64_t i_tt_ack_cnt:11; + uint64_t i_reserved:53; } ii_icsmm_fld_s; } ii_icsmm_u_t; - /************************************************************************ - * * + * * * This register contains the microscopic state, all the inputs to * * the protocol table, captured with the spurious message. Valid when * * the SP_MSG bit in the ICMR register is set. * - * * + * * ************************************************************************/ typedef union ii_icsmh_u { - uint64_t ii_icsmh_regval; - struct { - uint64_t i_tt_vld : 1; - uint64_t i_xerr : 1; - uint64_t i_ft_cwact_o : 1; - uint64_t i_ft_wact_o : 1; - uint64_t i_ft_active_o : 1; - uint64_t i_sync : 1; - uint64_t i_mnusg : 1; - uint64_t i_mnusz : 1; - uint64_t i_plusz : 1; - uint64_t i_plusg : 1; - uint64_t i_tt_exc : 5; - uint64_t i_tt_wb : 1; - uint64_t i_tt_hold : 1; - uint64_t i_tt_ack : 1; - uint64_t i_tt_resp : 1; - uint64_t i_tt_intvn : 1; - uint64_t i_g_stall_bte1 : 1; - uint64_t i_g_stall_bte0 : 1; - uint64_t i_g_stall_il : 1; - uint64_t i_g_stall_ib : 1; - uint64_t i_tt_imsg : 8; - uint64_t i_tt_imsgtype : 2; - uint64_t i_tt_use_old : 1; - uint64_t i_tt_respreqd : 1; - uint64_t i_tt_bte_num : 1; - uint64_t i_cbn : 1; - uint64_t i_match : 1; - uint64_t i_rpcnt_lt_34 : 1; - uint64_t i_rpcnt_ge_34 : 1; - uint64_t i_rpcnt_lt_18 : 1; - uint64_t i_rpcnt_ge_18 : 1; - uint64_t i_rpcnt_lt_2 : 1; - uint64_t i_rpcnt_ge_2 : 1; - uint64_t i_rqcnt_lt_18 : 1; - uint64_t i_rqcnt_ge_18 : 1; - uint64_t i_rqcnt_lt_2 : 1; - uint64_t i_rqcnt_ge_2 : 1; - uint64_t i_tt_device : 7; - uint64_t i_tt_init : 3; - uint64_t i_reserved : 5; + uint64_t ii_icsmh_regval; + struct { + uint64_t i_tt_vld:1; + uint64_t i_xerr:1; + uint64_t i_ft_cwact_o:1; + uint64_t i_ft_wact_o:1; + uint64_t i_ft_active_o:1; + uint64_t i_sync:1; + uint64_t i_mnusg:1; + uint64_t i_mnusz:1; + uint64_t i_plusz:1; + uint64_t i_plusg:1; + uint64_t i_tt_exc:5; + uint64_t i_tt_wb:1; + uint64_t i_tt_hold:1; + uint64_t i_tt_ack:1; + uint64_t i_tt_resp:1; + uint64_t i_tt_intvn:1; + uint64_t i_g_stall_bte1:1; + uint64_t i_g_stall_bte0:1; + uint64_t i_g_stall_il:1; + uint64_t i_g_stall_ib:1; + uint64_t i_tt_imsg:8; + uint64_t i_tt_imsgtype:2; + uint64_t i_tt_use_old:1; + uint64_t i_tt_respreqd:1; + uint64_t i_tt_bte_num:1; + uint64_t i_cbn:1; + uint64_t i_match:1; + uint64_t i_rpcnt_lt_34:1; + uint64_t i_rpcnt_ge_34:1; + uint64_t i_rpcnt_lt_18:1; + uint64_t i_rpcnt_ge_18:1; + uint64_t i_rpcnt_lt_2:1; + uint64_t i_rpcnt_ge_2:1; + uint64_t i_rqcnt_lt_18:1; + uint64_t i_rqcnt_ge_18:1; + uint64_t i_rqcnt_lt_2:1; + uint64_t i_rqcnt_ge_2:1; + uint64_t i_tt_device:7; + uint64_t i_tt_init:3; + uint64_t i_reserved:5; } ii_icsmh_fld_s; } ii_icsmh_u_t; - /************************************************************************ - * * + * * * The Shub DEBUG unit provides a 3-bit selection signal to the * * II core and a 3-bit selection signal to the fsbclk domain in the II * * wrapper. * - * * + * * ************************************************************************/ typedef union ii_idbss_u { - uint64_t ii_idbss_regval; - struct { - uint64_t i_iioclk_core_submenu : 3; - uint64_t i_rsvd : 5; - uint64_t i_fsbclk_wrapper_submenu : 3; - uint64_t i_rsvd_1 : 5; - uint64_t i_iioclk_menu : 5; - uint64_t i_rsvd_2 : 43; + uint64_t ii_idbss_regval; + struct { + uint64_t i_iioclk_core_submenu:3; + uint64_t i_rsvd:5; + uint64_t i_fsbclk_wrapper_submenu:3; + uint64_t i_rsvd_1:5; + uint64_t i_iioclk_menu:5; + uint64_t i_rsvd_2:43; } ii_idbss_fld_s; } ii_idbss_u_t; - /************************************************************************ - * * + * * * Description: This register is used to set up the length for a * * transfer and then to monitor the progress of that transfer. This * * register needs to be initialized before a transfer is started. A * @@ -2553,63 +2462,60 @@ typedef union ii_idbss_u { * transfer completes, hardware will clear the Busy bit. The length * * field will also contain the number of cache lines left to be * * transferred. * - * * + * * ************************************************************************/ typedef union ii_ibls0_u { - uint64_t ii_ibls0_regval; - struct { - uint64_t i_length : 16; - uint64_t i_error : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_busy : 1; - uint64_t i_rsvd : 43; + uint64_t ii_ibls0_regval; + struct { + uint64_t i_length:16; + uint64_t i_error:1; + uint64_t i_rsvd_1:3; + uint64_t i_busy:1; + uint64_t i_rsvd:43; } ii_ibls0_fld_s; } ii_ibls0_u_t; - /************************************************************************ - * * + * * * This register should be loaded before a transfer is started. The * * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * * address as described in Section 1.3, Figure2 and Figure3. Since * * the bottom 7 bits of the address are always taken to be zero, BTE * * transfers are always cacheline-aligned. * - * * + * * ************************************************************************/ typedef union ii_ibsa0_u { - uint64_t ii_ibsa0_regval; - struct { - uint64_t i_rsvd_1 : 7; - uint64_t i_addr : 42; - uint64_t i_rsvd : 15; + uint64_t ii_ibsa0_regval; + struct { + uint64_t i_rsvd_1:7; + uint64_t i_addr:42; + uint64_t i_rsvd:15; } ii_ibsa0_fld_s; } ii_ibsa0_u_t; - /************************************************************************ - * * + * * * This register should be loaded before a transfer is started. The * * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * * address as described in Section 1.3, Figure2 and Figure3. Since * * the bottom 7 bits of the address are always taken to be zero, BTE * * transfers are always cacheline-aligned. * - * * + * * ************************************************************************/ typedef union ii_ibda0_u { - uint64_t ii_ibda0_regval; - struct { - uint64_t i_rsvd_1 : 7; - uint64_t i_addr : 42; - uint64_t i_rsvd : 15; + uint64_t ii_ibda0_regval; + struct { + uint64_t i_rsvd_1:7; + uint64_t i_addr:42; + uint64_t i_rsvd:15; } ii_ibda0_fld_s; } ii_ibda0_u_t; - /************************************************************************ - * * + * * * Writing to this register sets up the attributes of the transfer * * and initiates the transfer operation. Reading this register has * * the side effect of terminating any transfer in progress. Note: * @@ -2617,61 +2523,58 @@ typedef union ii_ibda0_u { * other BTE. If a BTE stream has to be stopped (due to error * * handling for example), both BTE streams should be stopped and * * their transfers discarded. * - * * + * * ************************************************************************/ typedef union ii_ibct0_u { - uint64_t ii_ibct0_regval; - struct { - uint64_t i_zerofill : 1; - uint64_t i_rsvd_2 : 3; - uint64_t i_notify : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_poison : 1; - uint64_t i_rsvd : 55; + uint64_t ii_ibct0_regval; + struct { + uint64_t i_zerofill:1; + uint64_t i_rsvd_2:3; + uint64_t i_notify:1; + uint64_t i_rsvd_1:3; + uint64_t i_poison:1; + uint64_t i_rsvd:55; } ii_ibct0_fld_s; } ii_ibct0_u_t; - /************************************************************************ - * * + * * * This register contains the address to which the WINV is sent. * * This address has to be cache line aligned. * - * * + * * ************************************************************************/ typedef union ii_ibna0_u { - uint64_t ii_ibna0_regval; - struct { - uint64_t i_rsvd_1 : 7; - uint64_t i_addr : 42; - uint64_t i_rsvd : 15; + uint64_t ii_ibna0_regval; + struct { + uint64_t i_rsvd_1:7; + uint64_t i_addr:42; + uint64_t i_rsvd:15; } ii_ibna0_fld_s; } ii_ibna0_u_t; - /************************************************************************ - * * + * * * This register contains the programmable level as well as the node * * ID and PI unit of the processor to which the interrupt will be * - * sent. * - * * + * sent. * + * * ************************************************************************/ typedef union ii_ibia0_u { - uint64_t ii_ibia0_regval; - struct { - uint64_t i_rsvd_2 : 1; - uint64_t i_node_id : 11; - uint64_t i_rsvd_1 : 4; - uint64_t i_level : 7; - uint64_t i_rsvd : 41; + uint64_t ii_ibia0_regval; + struct { + uint64_t i_rsvd_2:1; + uint64_t i_node_id:11; + uint64_t i_rsvd_1:4; + uint64_t i_level:7; + uint64_t i_rsvd:41; } ii_ibia0_fld_s; } ii_ibia0_u_t; - /************************************************************************ - * * + * * * Description: This register is used to set up the length for a * * transfer and then to monitor the progress of that transfer. This * * register needs to be initialized before a transfer is started. A * @@ -2682,63 +2585,60 @@ typedef union ii_ibia0_u { * transfer completes, hardware will clear the Busy bit. The length * * field will also contain the number of cache lines left to be * * transferred. * - * * + * * ************************************************************************/ typedef union ii_ibls1_u { - uint64_t ii_ibls1_regval; - struct { - uint64_t i_length : 16; - uint64_t i_error : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_busy : 1; - uint64_t i_rsvd : 43; + uint64_t ii_ibls1_regval; + struct { + uint64_t i_length:16; + uint64_t i_error:1; + uint64_t i_rsvd_1:3; + uint64_t i_busy:1; + uint64_t i_rsvd:43; } ii_ibls1_fld_s; } ii_ibls1_u_t; - /************************************************************************ - * * + * * * This register should be loaded before a transfer is started. The * * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * * address as described in Section 1.3, Figure2 and Figure3. Since * * the bottom 7 bits of the address are always taken to be zero, BTE * * transfers are always cacheline-aligned. * - * * + * * ************************************************************************/ typedef union ii_ibsa1_u { - uint64_t ii_ibsa1_regval; - struct { - uint64_t i_rsvd_1 : 7; - uint64_t i_addr : 33; - uint64_t i_rsvd : 24; + uint64_t ii_ibsa1_regval; + struct { + uint64_t i_rsvd_1:7; + uint64_t i_addr:33; + uint64_t i_rsvd:24; } ii_ibsa1_fld_s; } ii_ibsa1_u_t; - /************************************************************************ - * * + * * * This register should be loaded before a transfer is started. The * * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * * address as described in Section 1.3, Figure2 and Figure3. Since * * the bottom 7 bits of the address are always taken to be zero, BTE * * transfers are always cacheline-aligned. * - * * + * * ************************************************************************/ typedef union ii_ibda1_u { - uint64_t ii_ibda1_regval; - struct { - uint64_t i_rsvd_1 : 7; - uint64_t i_addr : 33; - uint64_t i_rsvd : 24; + uint64_t ii_ibda1_regval; + struct { + uint64_t i_rsvd_1:7; + uint64_t i_addr:33; + uint64_t i_rsvd:24; } ii_ibda1_fld_s; } ii_ibda1_u_t; - /************************************************************************ - * * + * * * Writing to this register sets up the attributes of the transfer * * and initiates the transfer operation. Reading this register has * * the side effect of terminating any transfer in progress. Note: * @@ -2746,61 +2646,58 @@ typedef union ii_ibda1_u { * other BTE. If a BTE stream has to be stopped (due to error * * handling for example), both BTE streams should be stopped and * * their transfers discarded. * - * * + * * ************************************************************************/ typedef union ii_ibct1_u { - uint64_t ii_ibct1_regval; - struct { - uint64_t i_zerofill : 1; - uint64_t i_rsvd_2 : 3; - uint64_t i_notify : 1; - uint64_t i_rsvd_1 : 3; - uint64_t i_poison : 1; - uint64_t i_rsvd : 55; + uint64_t ii_ibct1_regval; + struct { + uint64_t i_zerofill:1; + uint64_t i_rsvd_2:3; + uint64_t i_notify:1; + uint64_t i_rsvd_1:3; + uint64_t i_poison:1; + uint64_t i_rsvd:55; } ii_ibct1_fld_s; } ii_ibct1_u_t; - /************************************************************************ - * * + * * * This register contains the address to which the WINV is sent. * * This address has to be cache line aligned. * - * * + * * ************************************************************************/ typedef union ii_ibna1_u { - uint64_t ii_ibna1_regval; - struct { - uint64_t i_rsvd_1 : 7; - uint64_t i_addr : 33; - uint64_t i_rsvd : 24; + uint64_t ii_ibna1_regval; + struct { + uint64_t i_rsvd_1:7; + uint64_t i_addr:33; + uint64_t i_rsvd:24; } ii_ibna1_fld_s; } ii_ibna1_u_t; - /************************************************************************ - * * + * * * This register contains the programmable level as well as the node * * ID and PI unit of the processor to which the interrupt will be * - * sent. * - * * + * sent. * + * * ************************************************************************/ typedef union ii_ibia1_u { - uint64_t ii_ibia1_regval; - struct { - uint64_t i_pi_id : 1; - uint64_t i_node_id : 8; - uint64_t i_rsvd_1 : 7; - uint64_t i_level : 7; - uint64_t i_rsvd : 41; + uint64_t ii_ibia1_regval; + struct { + uint64_t i_pi_id:1; + uint64_t i_node_id:8; + uint64_t i_rsvd_1:7; + uint64_t i_level:7; + uint64_t i_rsvd:41; } ii_ibia1_fld_s; } ii_ibia1_u_t; - /************************************************************************ - * * + * * * This register defines the resources that feed information into * * the two performance counters located in the IO Performance * * Profiling Register. There are 17 different quantities that can be * @@ -2811,133 +2708,129 @@ typedef union ii_ibia1_u { * other is available from the other performance counter. Hence, the * * II supports all 17*16=272 possible combinations of quantities to * * measure. * - * * + * * ************************************************************************/ typedef union ii_ipcr_u { - uint64_t ii_ipcr_regval; - struct { - uint64_t i_ippr0_c : 4; - uint64_t i_ippr1_c : 4; - uint64_t i_icct : 8; - uint64_t i_rsvd : 48; + uint64_t ii_ipcr_regval; + struct { + uint64_t i_ippr0_c:4; + uint64_t i_ippr1_c:4; + uint64_t i_icct:8; + uint64_t i_rsvd:48; } ii_ipcr_fld_s; } ii_ipcr_u_t; - /************************************************************************ - * * - * * - * * + * * + * * + * * ************************************************************************/ typedef union ii_ippr_u { - uint64_t ii_ippr_regval; - struct { - uint64_t i_ippr0 : 32; - uint64_t i_ippr1 : 32; + uint64_t ii_ippr_regval; + struct { + uint64_t i_ippr0:32; + uint64_t i_ippr1:32; } ii_ippr_fld_s; } ii_ippr_u_t; - - -/************************************************************************** - * * - * The following defines which were not formed into structures are * - * probably indentical to another register, and the name of the * - * register is provided against each of these registers. This * - * information needs to be checked carefully * - * * - * IIO_ICRB1_A IIO_ICRB0_A * - * IIO_ICRB1_B IIO_ICRB0_B * - * IIO_ICRB1_C IIO_ICRB0_C * - * IIO_ICRB1_D IIO_ICRB0_D * - * IIO_ICRB1_E IIO_ICRB0_E * - * IIO_ICRB2_A IIO_ICRB0_A * - * IIO_ICRB2_B IIO_ICRB0_B * - * IIO_ICRB2_C IIO_ICRB0_C * - * IIO_ICRB2_D IIO_ICRB0_D * - * IIO_ICRB2_E IIO_ICRB0_E * - * IIO_ICRB3_A IIO_ICRB0_A * - * IIO_ICRB3_B IIO_ICRB0_B * - * IIO_ICRB3_C IIO_ICRB0_C * - * IIO_ICRB3_D IIO_ICRB0_D * - * IIO_ICRB3_E IIO_ICRB0_E * - * IIO_ICRB4_A IIO_ICRB0_A * - * IIO_ICRB4_B IIO_ICRB0_B * - * IIO_ICRB4_C IIO_ICRB0_C * - * IIO_ICRB4_D IIO_ICRB0_D * - * IIO_ICRB4_E IIO_ICRB0_E * - * IIO_ICRB5_A IIO_ICRB0_A * - * IIO_ICRB5_B IIO_ICRB0_B * - * IIO_ICRB5_C IIO_ICRB0_C * - * IIO_ICRB5_D IIO_ICRB0_D * - * IIO_ICRB5_E IIO_ICRB0_E * - * IIO_ICRB6_A IIO_ICRB0_A * - * IIO_ICRB6_B IIO_ICRB0_B * - * IIO_ICRB6_C IIO_ICRB0_C * - * IIO_ICRB6_D IIO_ICRB0_D * - * IIO_ICRB6_E IIO_ICRB0_E * - * IIO_ICRB7_A IIO_ICRB0_A * - * IIO_ICRB7_B IIO_ICRB0_B * - * IIO_ICRB7_C IIO_ICRB0_C * - * IIO_ICRB7_D IIO_ICRB0_D * - * IIO_ICRB7_E IIO_ICRB0_E * - * IIO_ICRB8_A IIO_ICRB0_A * - * IIO_ICRB8_B IIO_ICRB0_B * - * IIO_ICRB8_C IIO_ICRB0_C * - * IIO_ICRB8_D IIO_ICRB0_D * - * IIO_ICRB8_E IIO_ICRB0_E * - * IIO_ICRB9_A IIO_ICRB0_A * - * IIO_ICRB9_B IIO_ICRB0_B * - * IIO_ICRB9_C IIO_ICRB0_C * - * IIO_ICRB9_D IIO_ICRB0_D * - * IIO_ICRB9_E IIO_ICRB0_E * - * IIO_ICRBA_A IIO_ICRB0_A * - * IIO_ICRBA_B IIO_ICRB0_B * - * IIO_ICRBA_C IIO_ICRB0_C * - * IIO_ICRBA_D IIO_ICRB0_D * - * IIO_ICRBA_E IIO_ICRB0_E * - * IIO_ICRBB_A IIO_ICRB0_A * - * IIO_ICRBB_B IIO_ICRB0_B * - * IIO_ICRBB_C IIO_ICRB0_C * - * IIO_ICRBB_D IIO_ICRB0_D * - * IIO_ICRBB_E IIO_ICRB0_E * - * IIO_ICRBC_A IIO_ICRB0_A * - * IIO_ICRBC_B IIO_ICRB0_B * - * IIO_ICRBC_C IIO_ICRB0_C * - * IIO_ICRBC_D IIO_ICRB0_D * - * IIO_ICRBC_E IIO_ICRB0_E * - * IIO_ICRBD_A IIO_ICRB0_A * - * IIO_ICRBD_B IIO_ICRB0_B * - * IIO_ICRBD_C IIO_ICRB0_C * - * IIO_ICRBD_D IIO_ICRB0_D * - * IIO_ICRBD_E IIO_ICRB0_E * - * IIO_ICRBE_A IIO_ICRB0_A * - * IIO_ICRBE_B IIO_ICRB0_B * - * IIO_ICRBE_C IIO_ICRB0_C * - * IIO_ICRBE_D IIO_ICRB0_D * - * IIO_ICRBE_E IIO_ICRB0_E * - * * - **************************************************************************/ - +/************************************************************************ + * * + * The following defines which were not formed into structures are * + * probably indentical to another register, and the name of the * + * register is provided against each of these registers. This * + * information needs to be checked carefully * + * * + * IIO_ICRB1_A IIO_ICRB0_A * + * IIO_ICRB1_B IIO_ICRB0_B * + * IIO_ICRB1_C IIO_ICRB0_C * + * IIO_ICRB1_D IIO_ICRB0_D * + * IIO_ICRB1_E IIO_ICRB0_E * + * IIO_ICRB2_A IIO_ICRB0_A * + * IIO_ICRB2_B IIO_ICRB0_B * + * IIO_ICRB2_C IIO_ICRB0_C * + * IIO_ICRB2_D IIO_ICRB0_D * + * IIO_ICRB2_E IIO_ICRB0_E * + * IIO_ICRB3_A IIO_ICRB0_A * + * IIO_ICRB3_B IIO_ICRB0_B * + * IIO_ICRB3_C IIO_ICRB0_C * + * IIO_ICRB3_D IIO_ICRB0_D * + * IIO_ICRB3_E IIO_ICRB0_E * + * IIO_ICRB4_A IIO_ICRB0_A * + * IIO_ICRB4_B IIO_ICRB0_B * + * IIO_ICRB4_C IIO_ICRB0_C * + * IIO_ICRB4_D IIO_ICRB0_D * + * IIO_ICRB4_E IIO_ICRB0_E * + * IIO_ICRB5_A IIO_ICRB0_A * + * IIO_ICRB5_B IIO_ICRB0_B * + * IIO_ICRB5_C IIO_ICRB0_C * + * IIO_ICRB5_D IIO_ICRB0_D * + * IIO_ICRB5_E IIO_ICRB0_E * + * IIO_ICRB6_A IIO_ICRB0_A * + * IIO_ICRB6_B IIO_ICRB0_B * + * IIO_ICRB6_C IIO_ICRB0_C * + * IIO_ICRB6_D IIO_ICRB0_D * + * IIO_ICRB6_E IIO_ICRB0_E * + * IIO_ICRB7_A IIO_ICRB0_A * + * IIO_ICRB7_B IIO_ICRB0_B * + * IIO_ICRB7_C IIO_ICRB0_C * + * IIO_ICRB7_D IIO_ICRB0_D * + * IIO_ICRB7_E IIO_ICRB0_E * + * IIO_ICRB8_A IIO_ICRB0_A * + * IIO_ICRB8_B IIO_ICRB0_B * + * IIO_ICRB8_C IIO_ICRB0_C * + * IIO_ICRB8_D IIO_ICRB0_D * + * IIO_ICRB8_E IIO_ICRB0_E * + * IIO_ICRB9_A IIO_ICRB0_A * + * IIO_ICRB9_B IIO_ICRB0_B * + * IIO_ICRB9_C IIO_ICRB0_C * + * IIO_ICRB9_D IIO_ICRB0_D * + * IIO_ICRB9_E IIO_ICRB0_E * + * IIO_ICRBA_A IIO_ICRB0_A * + * IIO_ICRBA_B IIO_ICRB0_B * + * IIO_ICRBA_C IIO_ICRB0_C * + * IIO_ICRBA_D IIO_ICRB0_D * + * IIO_ICRBA_E IIO_ICRB0_E * + * IIO_ICRBB_A IIO_ICRB0_A * + * IIO_ICRBB_B IIO_ICRB0_B * + * IIO_ICRBB_C IIO_ICRB0_C * + * IIO_ICRBB_D IIO_ICRB0_D * + * IIO_ICRBB_E IIO_ICRB0_E * + * IIO_ICRBC_A IIO_ICRB0_A * + * IIO_ICRBC_B IIO_ICRB0_B * + * IIO_ICRBC_C IIO_ICRB0_C * + * IIO_ICRBC_D IIO_ICRB0_D * + * IIO_ICRBC_E IIO_ICRB0_E * + * IIO_ICRBD_A IIO_ICRB0_A * + * IIO_ICRBD_B IIO_ICRB0_B * + * IIO_ICRBD_C IIO_ICRB0_C * + * IIO_ICRBD_D IIO_ICRB0_D * + * IIO_ICRBD_E IIO_ICRB0_E * + * IIO_ICRBE_A IIO_ICRB0_A * + * IIO_ICRBE_B IIO_ICRB0_B * + * IIO_ICRBE_C IIO_ICRB0_C * + * IIO_ICRBE_D IIO_ICRB0_D * + * IIO_ICRBE_E IIO_ICRB0_E * + * * + ************************************************************************/ /* * Slightly friendlier names for some common registers. */ -#define IIO_WIDGET IIO_WID /* Widget identification */ -#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ -#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ -#define IIO_PROTECT IIO_ILAPR /* IO interface protection */ -#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ -#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ -#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ -#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ -#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ -#define IIO_LLP_LOG IIO_ILLR /* LLP log */ -#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/ -#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ -#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ +#define IIO_WIDGET IIO_WID /* Widget identification */ +#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ +#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ +#define IIO_PROTECT IIO_ILAPR /* IO interface protection */ +#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ +#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ +#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ +#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ +#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ +#define IIO_LLP_LOG IIO_ILLR /* LLP log */ +#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */ +#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ +#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ #define IIO_IGFX_0 IIO_IGFX0 #define IIO_IGFX_1 IIO_IGFX1 #define IIO_IBCT_0 IIO_IBCT0 @@ -2957,12 +2850,12 @@ typedef union ii_ippr_u { #define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x))) #define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x))) #define IIO_NUM_PRTES 8 /* Total number of PRB table entries */ -#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ -#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ +#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ +#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ -#define IIO_NUM_IPRBS (9) +#define IIO_NUM_IPRBS 9 -#define IIO_LLP_CSR_IS_UP 0x00002000 +#define IIO_LLP_CSR_IS_UP 0x00002000 #define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000 #define IIO_LLP_CSR_LLP_STAT_SHFT 12 @@ -2970,30 +2863,29 @@ typedef union ii_ippr_u { #define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */ /* key to IIO_PROTECT_OVRRD */ -#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ +#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ /* BTE register names */ -#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ -#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ -#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ -#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ -#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ -#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ -#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ -#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ +#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ +#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ +#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ +#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ +#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ +#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ +#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ +#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ /* BTE register offsets from base */ #define BTEOFF_STAT 0 -#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) -#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) -#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) -#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) -#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) - +#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) +#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) +#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) +#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) +#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) /* names used in shub diags */ -#define IIO_BASE_BTE0 IIO_IBLS_0 -#define IIO_BASE_BTE1 IIO_IBLS_1 +#define IIO_BASE_BTE0 IIO_IBLS_0 +#define IIO_BASE_BTE1 IIO_IBLS_1 /* * Macro which takes the widget number, and returns the @@ -3001,10 +2893,9 @@ typedef union ii_ippr_u { * value _x is expected to be a widget number in the range * 0, 8 - 0xF */ -#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ - (_x) : \ - (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) - +#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ + (_x) : \ + (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) /* GFX Flow Control Node/Widget Register */ #define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */ @@ -3025,7 +2916,6 @@ typedef union ii_ippr_u { (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \ (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT)) - /* Scratch registers (all bits available) */ #define IIO_SCRATCH_REG0 IIO_ISCR0 #define IIO_SCRATCH_REG1 IIO_ISCR1 @@ -3046,21 +2936,21 @@ typedef union ii_ippr_u { #define IIO_SCRATCH_BIT1_0 0x0000000000000001UL #define IIO_SCRATCH_BIT1_1 0x0000000000000002UL /* IO Translation Table Entries */ -#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ - /* Hw manuals number them 1..7! */ +#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ + /* Hw manuals number them 1..7! */ /* * IIO_IMEM Register fields. */ -#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ -#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ -#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ +#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ +#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ +#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ /* * As a permanent workaround for a bug in the PI side of the shub, we've * redefined big window 7 as small window 0. XXX does this still apply for SN1?? */ -#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) +#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) /* * Use the top big window as a surrogate for the first small window @@ -3071,11 +2961,11 @@ typedef union ii_ippr_u { /* * CRB manipulation macros - * The CRB macros are slightly complicated, since there are up to - * four registers associated with each CRB entry. + * The CRB macros are slightly complicated, since there are up to + * four registers associated with each CRB entry. */ -#define IIO_NUM_CRBS 15 /* Number of CRBs */ -#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ +#define IIO_NUM_CRBS 15 /* Number of CRBs */ +#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ #define IIO_ICRB_OFFSET 8 #define IIO_ICRB_0 IIO_ICRB0_A #define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */ @@ -3083,43 +2973,43 @@ typedef union ii_ippr_u { #define IIO_FIRST_PC_ENTRY 12 */ -#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) -#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) -#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) -#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) -#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) +#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) +#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) +#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) +#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) +#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) #define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7) /* * values for "ecode" field */ -#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ -#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ -#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access - * e.g. WINV to a Read only line. */ -#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ -#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ -#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ -#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ -#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ +#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ +#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ +#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access + * e.g. WINV to a Read only line. */ +#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ +#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ +#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ +#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ +#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ /* * Values for field imsgtype */ -#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ -#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ -#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ -#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ +#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ +#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ +#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ +#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ /* * values for field initiator. */ -#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ -#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ -#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ -#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ -#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ +#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ +#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ +#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ +#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ +#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ /* * Number of credits Hub widget has while sending req/response to @@ -3127,8 +3017,8 @@ typedef union ii_ippr_u { * Value of 3 is required by Xbow 1.1 * We may be able to increase this to 4 with Xbow 1.2. */ -#define HUBII_XBOW_CREDIT 3 -#define HUBII_XBOW_REV2_CREDIT 4 +#define HUBII_XBOW_CREDIT 3 +#define HUBII_XBOW_REV2_CREDIT 4 /* * Number of credits that xtalk devices should use when communicating @@ -3159,28 +3049,28 @@ typedef union ii_ippr_u { */ #define IIO_ICMR_CRB_VLD_SHFT 20 -#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) +#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) #define IIO_ICMR_FC_CNT_SHFT 16 -#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) +#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) #define IIO_ICMR_C_CNT_SHFT 4 -#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) +#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) -#define IIO_ICMR_PRECISE (1UL << 52) -#define IIO_ICMR_CLR_RPPD (1UL << 13) -#define IIO_ICMR_CLR_RQPD (1UL << 12) +#define IIO_ICMR_PRECISE (1UL << 52) +#define IIO_ICMR_CLR_RPPD (1UL << 13) +#define IIO_ICMR_CLR_RQPD (1UL << 12) /* * IIO PIO Deallocation register field masks : (IIO_IPDR) XXX present but not needed in bedrock? See the manual. */ -#define IIO_IPDR_PND (1 << 4) +#define IIO_IPDR_PND (1 << 4) /* * IIO CRB deallocation register field masks: (IIO_ICDR) */ -#define IIO_ICDR_PND (1 << 4) +#define IIO_ICDR_PND (1 << 4) /* * IO BTE Length/Status (IIO_IBLS) register bit field definitions @@ -3223,35 +3113,35 @@ typedef union ii_ippr_u { /* * IO Error Clear register bit field definitions */ -#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ -#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ -#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ -#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ -#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ -#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ -#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ -#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ -#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ -#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ -#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ -#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ -#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ -#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ -#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ +#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ +#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ +#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ +#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ +#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ +#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ +#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ +#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ +#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ +#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ +#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ +#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ +#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ +#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ +#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ /* * IIO CRB control register Fields: IIO_ICCR */ -#define IIO_ICCR_PENDING (0x10000) -#define IIO_ICCR_CMD_MASK (0xFF) -#define IIO_ICCR_CMD_SHFT (7) -#define IIO_ICCR_CMD_NOP (0x0) /* No Op */ -#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */ -#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */ -#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory +#define IIO_ICCR_PENDING 0x10000 +#define IIO_ICCR_CMD_MASK 0xFF +#define IIO_ICCR_CMD_SHFT 7 +#define IIO_ICCR_CMD_NOP 0x0 /* No Op */ +#define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */ +#define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */ +#define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory * via a WB */ -#define IIO_ICCR_CMD_FLUSH (0x800) +#define IIO_ICCR_CMD_FLUSH 0x800 /* * @@ -3283,8 +3173,8 @@ typedef union ii_ippr_u { * Easy access macros for CRBs, all 5 registers (A-E) */ typedef ii_icrb0_a_u_t icrba_t; -#define a_sidn ii_icrb0_a_fld_s.ia_sidn -#define a_tnum ii_icrb0_a_fld_s.ia_tnum +#define a_sidn ii_icrb0_a_fld_s.ia_sidn +#define a_tnum ii_icrb0_a_fld_s.ia_tnum #define a_addr ii_icrb0_a_fld_s.ia_addr #define a_valid ii_icrb0_a_fld_s.ia_vld #define a_iow ii_icrb0_a_fld_s.ia_iow @@ -3324,14 +3214,13 @@ typedef ii_icrb0_c_u_t icrbc_t; #define c_source ii_icrb0_c_fld_s.ic_source #define c_regvalue ii_icrb0_c_regval - typedef ii_icrb0_d_u_t icrbd_t; #define d_sleep ii_icrb0_d_fld_s.id_sleep #define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt #define d_pripsc ii_icrb0_d_fld_s.id_pr_psc #define d_bteop ii_icrb0_d_fld_s.id_bte_op -#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ -#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ +#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */ +#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */ #define d_regvalue ii_icrb0_d_regval typedef ii_icrb0_e_u_t icrbe_t; @@ -3341,7 +3230,6 @@ typedef ii_icrb0_e_u_t icrbe_t; #define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout #define e_regvalue ii_icrb0_e_regval - /* Number of widgets supported by shub */ #define HUB_NUM_WIDGET 9 #define HUB_WIDGET_ID_MIN 0x8 @@ -3367,27 +3255,27 @@ typedef ii_icrb0_e_u_t icrbe_t; #define LNK_STAT_WORKING 0x2 /* LLP is working */ -#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ -#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ -#define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */ -#define IIO_WSTAT_TXRETRY_SHFT (16) -#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ - IIO_WSTAT_TXRETRY_MASK) +#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ +#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ +#define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */ +#define IIO_WSTAT_TXRETRY_SHFT 16 +#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ + IIO_WSTAT_TXRETRY_MASK) /* Number of II perf. counters we can multiplex at once */ #define IO_PERF_SETS 32 /* Bit for the widget in inbound access register */ -#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) +#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) /* Bit for the widget in outbound access register */ -#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) +#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) /* NOTE: The following define assumes that we are going to get * widget numbers from 8 thru F and the device numbers within * widget from 0 thru 7. */ -#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d)))) +#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d)))) /* IO Interrupt Destination Register */ #define IIO_IIDSR_SENT_SHIFT 28 @@ -3402,11 +3290,11 @@ typedef ii_icrb0_e_u_t icrbe_t; #define IIO_IIDSR_LVL_MASK 0x000000ff /* Xtalk timeout threshhold register (IIO_IXTT) */ -#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ +#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) -#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ +#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ #define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT) -#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ +#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ #define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT) /* @@ -3414,17 +3302,17 @@ typedef ii_icrb0_e_u_t icrbe_t; */ typedef union hubii_wcr_u { - uint64_t wcr_reg_value; - struct { - uint64_t wcr_widget_id: 4, /* LLP crossbar credit */ - wcr_tag_mode: 1, /* Tag mode */ - wcr_rsvd1: 8, /* Reserved */ - wcr_xbar_crd: 3, /* LLP crossbar credit */ - wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */ - wcr_dir_con: 1, /* widget direct connect */ - wcr_e_thresh: 5, /* elasticity threshold */ - wcr_rsvd: 41; /* unused */ - } wcr_fields_s; + uint64_t wcr_reg_value; + struct { + uint64_t wcr_widget_id:4, /* LLP crossbar credit */ + wcr_tag_mode:1, /* Tag mode */ + wcr_rsvd1:8, /* Reserved */ + wcr_xbar_crd:3, /* LLP crossbar credit */ + wcr_f_bad_pkt:1, /* Force bad llp pkt enable */ + wcr_dir_con:1, /* widget direct connect */ + wcr_e_thresh:5, /* elasticity threshold */ + wcr_rsvd:41; /* unused */ + } wcr_fields_s; } hubii_wcr_t; #define iwcr_dir_con wcr_fields_s.wcr_dir_con @@ -3436,41 +3324,35 @@ performance registers */ performed */ typedef union io_perf_sel { - uint64_t perf_sel_reg; - struct { - uint64_t perf_ippr0 : 4, - perf_ippr1 : 4, - perf_icct : 8, - perf_rsvd : 48; - } perf_sel_bits; + uint64_t perf_sel_reg; + struct { + uint64_t perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48; + } perf_sel_bits; } io_perf_sel_t; /* io_perf_cnt is to extract the count from the shub registers. Due to hardware problems there is only one counter, not two. */ typedef union io_perf_cnt { - uint64_t perf_cnt; - struct { - uint64_t perf_cnt : 20, - perf_rsvd2 : 12, - perf_rsvd1 : 32; - } perf_cnt_bits; + uint64_t perf_cnt; + struct { + uint64_t perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32; + } perf_cnt_bits; } io_perf_cnt_t; typedef union iprte_a { - uint64_t entry; - struct { - uint64_t i_rsvd_1 : 3; - uint64_t i_addr : 38; - uint64_t i_init : 3; - uint64_t i_source : 8; - uint64_t i_rsvd : 2; - uint64_t i_widget : 4; - uint64_t i_to_cnt : 5; - uint64_t i_vld : 1; + uint64_t entry; + struct { + uint64_t i_rsvd_1:3; + uint64_t i_addr:38; + uint64_t i_init:3; + uint64_t i_source:8; + uint64_t i_rsvd:2; + uint64_t i_widget:4; + uint64_t i_to_cnt:5; + uint64_t i_vld:1; } iprte_fields; } iprte_a_t; -#endif /* _ASM_IA64_SN_SHUBIO_H */ - +#endif /* _ASM_IA64_SN_SHUBIO_H */ -- cgit v0.10.2 From 2074615a13a4f250e0a4e3f6ec8e3733b950a783 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Fri, 18 Feb 2005 19:09:00 -0700 Subject: [IA64] use fc.i for fluch_icache_range() This is a small patch to switch fluch_icache_range() to use fc.i instead of fc. This would save time on processors which can establish i-cache coherency without flushing the cache-line out to memory (not that any current processors do). On existing processors, fc.i behaves like fc. The only caveat is that very old assemblers may not know about fc.i yet. Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 29c802b..a1af914 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S @@ -1,8 +1,8 @@ /* * Cache flushing routines. * - * Copyright (C) 1999-2001 Hewlett-Packard Co - * Copyright (C) 1999-2001 David Mosberger-Tang + * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co + * David Mosberger-Tang */ #include #include @@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range) mov ar.lc=r8 ;; -.Loop: fc in0 // issuable on M0 only +.Loop: fc.i in0 // issuable on M2 only add in0=32,in0 br.cloop.sptk.few .Loop ;; -- cgit v0.10.2 From c2d1d65ad441c8abe624bdb1c2cff2e47c8c1ee1 Mon Sep 17 00:00:00 2001 From: Bruce Losure Date: Thu, 24 Mar 2005 12:28:00 -0700 Subject: [IA64-SGI] Altix only: Remove hubdev SAL call Hi Tony, This patch against ia64-test-2.6.12 fixes a bug where the tiocx code was inadvertently un-doing some address modifications done in earlier fixup code. This patch just removes the offending code. Signed-off-by: Bruce Losure Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 66190d7..128ccf2 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type); EXPORT_SYMBOL(tiocx_dma_addr); EXPORT_SYMBOL(tiocx_swin_base); -static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address) -{ - - struct ia64_sal_retval ret_stuff; - ret_stuff.status = 0; - ret_stuff.v0 = 0; - - ia64_sal_oemcall_nolock(&ret_stuff, - SN_SAL_IOIF_GET_HUBDEV_INFO, - handle, address, 0, 0, 0, 0, 0); - return ret_stuff.v0; -} - static void tio_conveyor_set(nasid_t nasid, int enable_flag) { uint64_t ice_frz; @@ -477,18 +464,12 @@ static int __init tiocx_init(void) if (nasid & 0x1) { /* TIO's are always odd */ struct hubdev_info *hubdev; - uint64_t status; struct xwidget_info *widgetp; DBG("Found TIO at nasid 0x%x\n", nasid); hubdev = (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); - status = - tiocx_get_hubdev_info(nasid, - (uint64_t) __pa(hubdev)); - if (status) - continue; widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; -- cgit v0.10.2 From ae40aae9b9b8e336714ebb3f16410da6e69d6ac8 Mon Sep 17 00:00:00 2001 From: Bruce Losure Date: Mon, 4 Apr 2005 13:23:00 -0700 Subject: [IA64-SGI] Altix patch to fix missing Kconfig dependency. This is a one-liner to make the mbcs driver depend on SGI_TIOCX in the drivers/char/Kconfig file. Signed-off-by: Bruce Losure Signed-off-by: Tony Luck diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index e162dab..a70e6b9 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -408,7 +408,7 @@ config SGI_TIOCX config SGI_MBCS tristate "SGI FPGA Core Services driver support" - depends on (IA64_SGI_SN2 || IA64_GENERIC) + depends on SGI_TIOCX help If you have an SGI Altix with an attached SABrick say Y or M here, otherwise say N. -- cgit v0.10.2 From ce0a3956b32650e229b68964c4400bbdc5ad3ca1 Mon Sep 17 00:00:00 2001 From: Bruce Losure Date: Mon, 25 Apr 2005 19:41:00 -0700 Subject: [IA64-SGI] Altix patch to add bricktype knowledge to tiocx Here is a patch to enable the SGI tiocx bus driver to distingush between FPGA-attached h/w and non-FPGA-attached h/w. Signed-off-by: Bruce Losure Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 128ccf2..ab9b5f3 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "tio.h" #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" @@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info) } } -uint64_t -tiocx_dma_addr(uint64_t addr) +uint64_t tiocx_dma_addr(uint64_t addr) { return PHYS_TO_TIODMA(addr); } -uint64_t -tiocx_swin_base(int nasid) +uint64_t tiocx_swin_base(int nasid) { return TIO_SWIN_BASE(nasid, TIOCX_CORELET); } @@ -366,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) udelay(2000); } -static int fpga_attached(nasid_t nasid) +static int tiocx_btchar_get(int nasid) +{ + moduleid_t module_id; + geoid_t geoid; + int cnodeid; + + cnodeid = nasid_to_cnodeid(nasid); + geoid = cnodeid_get_geoid(cnodeid); + module_id = geo_module(geoid); + return MODULE_GET_BTCHAR(module_id); +} + +static int is_fpga_brick(int nasid) +{ + switch (tiocx_btchar_get(nasid)) { + case L1_BRICKTYPE_SA: + case L1_BRICKTYPE_ATHENA: + return 1; + } + return 0; +} + +static int bitstream_loaded(nasid_t nasid) { uint64_t cx_credits; @@ -383,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev) int mfg_num = CX_DEV_NONE; nasid_t nasid = cx_dev->cx_id.nasid; - if (fpga_attached(nasid)) { + if (bitstream_loaded(nasid)) { uint64_t cx_id; cx_id = @@ -414,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf) { struct cx_dev *cx_dev = to_cx_dev(dev); - return sprintf(buf, "0x%x 0x%x 0x%x\n", + return sprintf(buf, "0x%x 0x%x 0x%x %d\n", cx_dev->cx_id.nasid, - cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num); + cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, + tiocx_btchar_get(cx_dev->cx_id.nasid)); } static ssize_t store_cxdev_control(struct device *dev, const char *buf, @@ -462,7 +485,7 @@ static int __init tiocx_init(void) if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) break; /* No more nasids .. bail out of loop */ - if (nasid & 0x1) { /* TIO's are always odd */ + if ((nasid & 0x1) && is_fpga_brick(nasid)) { struct hubdev_info *hubdev; struct xwidget_info *widgetp; diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h index d5dbd55..08050d3 100644 --- a/include/asm-ia64/sn/l1.h +++ b/include/asm-ia64/sn/l1.h @@ -29,8 +29,9 @@ #define L1_BRICKTYPE_CHI_CG 0x76 /* v */ #define L1_BRICKTYPE_X 0x78 /* x */ #define L1_BRICKTYPE_X2 0x79 /* y */ -#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */ +#define L1_BRICKTYPE_SA 0x5e /* ^ */ #define L1_BRICKTYPE_PA 0x6a /* j */ #define L1_BRICKTYPE_IA 0x6b /* k */ +#define L1_BRICKTYPE_ATHENA 0x2b /* + */ #endif /* _ASM_IA64_SN_L1_H */ -- cgit v0.10.2 From 2e34f07ff0c944399a6456e2d91cf0ca1d9a497c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Mon, 21 Mar 2005 19:41:00 -0700 Subject: [PATCH] move cnodeid_to_nasid_table out of pda Another step in the effort to eliminate the SN pda structure. This patch moves the cnodeid_to_nasid_table field out of the pda, making it a standalone per-cpu data item, and exports it so it can be accessed by kernel modules. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index d35f2a6..fea71ee 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. */ #include @@ -73,6 +73,9 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); +EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); + partid_t sn_partid = -1; EXPORT_SYMBOL(sn_partid); char sn_system_serial_number_string[128]; @@ -373,11 +376,11 @@ static void __init sn_init_pdas(char **cmdline_p) { cnodeid_t cnode; - memset(pda->cnodeid_to_nasid_table, -1, - sizeof(pda->cnodeid_to_nasid_table)); + memset(sn_cnodeid_to_nasid, -1, + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); for_each_online_node(cnode) - pda->cnodeid_to_nasid_table[cnode] = - pxm_to_nasid(nid_to_pxm_map[cnode]); + sn_cnodeid_to_nasid[cnode] = + pxm_to_nasid(nid_to_pxm_map[cnode]); numionodes = num_online_nodes(); scan_for_ionodes(); @@ -486,15 +489,18 @@ void __init sn_cpu_init(void) pda->idle_flag = 0; if (cpuid != 0) { - memcpy(pda->cnodeid_to_nasid_table, - pdacpu(0)->cnodeid_to_nasid_table, - sizeof(pda->cnodeid_to_nasid_table)); + /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ + memcpy(sn_cnodeid_to_nasid, + (&per_cpu(__sn_cnodeid_to_nasid, 0)), + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); } /* * Check for WARs. * Only needs to be done once, on BSP. - * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. + * Has to be done after loop above, because it uses this cpu's + * sn_cnodeid_to_nasid table which was just initialized if this + * isn't cpu 0. * Has to be done before assignment below. */ if (!wars_have_been_checked) { @@ -580,8 +586,7 @@ static void __init scan_for_ionodes(void) brd = find_lboard_any(brd, KLTYPE_SNIA); while (brd) { - pda->cnodeid_to_nasid_table[numionodes] = - brd->brd_nasid; + sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = numionodes; root_lboard[numionodes] = brd; numionodes++; @@ -602,8 +607,7 @@ static void __init scan_for_ionodes(void) root_lboard[nasid_to_cnodeid(nasid)], KLTYPE_TIO); while (brd) { - pda->cnodeid_to_nasid_table[numionodes] = - brd->brd_nasid; + sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = numionodes; root_lboard[numionodes] = brd; numionodes++; @@ -614,7 +618,6 @@ static void __init scan_for_ionodes(void) brd = find_lboard_any(brd, KLTYPE_TIO); } } - } int diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index 7c349f0..e6d9aa6 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h @@ -5,7 +5,7 @@ * * SGI specific setup. * - * Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) */ #ifndef _ASM_IA64_SN_ARCH_H @@ -47,6 +47,15 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); #define MAX_COMPACT_NODES 2048 #define CPUS_PER_NODE 4 + +/* + * Compact node ID to nasid mappings kept in the per-cpu data areas of each + * cpu. + */ +DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); +#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) + + extern void sn_flush_all_caches(long addr, long bytes); #endif /* _ASM_IA64_SN_ARCH_H */ diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h index cd19f17..a5340cf 100644 --- a/include/asm-ia64/sn/pda.h +++ b/include/asm-ia64/sn/pda.h @@ -49,7 +49,6 @@ typedef struct pda_s { unsigned long sn_soft_irr[4]; unsigned long sn_in_service_ivecs[4]; - short cnodeid_to_nasid_table[MAX_NUMNODES]; int sn_lb_int_war_ticks; int sn_last_irq; int sn_first_irq; diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h index 685435a..6b44290 100644 --- a/include/asm-ia64/sn/sn_cpuid.h +++ b/include/asm-ia64/sn/sn_cpuid.h @@ -4,7 +4,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. */ @@ -123,11 +123,8 @@ extern int nasid_slice_to_cpuid(int, int); /* * cnodeid_to_nasid - convert a cnodeid to a NASID - * Macro relies on pg_data for a node being on the node itself. - * Just extract the NASID from the pointer. - * */ -#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid] +#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid]) /* * nasid_to_cnodeid - convert a NASID to a cnodeid -- cgit v0.10.2 From 7223a93a5321f84337647aef62ef947afd8df41a Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:00:00 -0700 Subject: [IA64] Export node_online_map and node_possible_map Export node_online_map and node_possible_map so that kernel modules can use the nodemask macros, like, for_each_node() and for_each_online_node(). Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fc1b106..b1061b1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -43,7 +43,9 @@ * initializer cleaner */ nodemask_t node_online_map = { { [0] = 1UL } }; +EXPORT_SYMBOL(node_online_map); nodemask_t node_possible_map = NODE_MASK_ALL; +EXPORT_SYMBOL(node_possible_map); struct pglist_data *pgdat_list; unsigned long totalram_pages; unsigned long totalhigh_pages; -- cgit v0.10.2 From 21e37283909c12e300ab87c20f5addc878cda9f9 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:08:00 -0700 Subject: [IA64-SGI] Define some additional SHub1 and Shub2 register symbols Define some additional SHub1 and SHub2 register symbols. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/include/asm-ia64/sn/shub_mmr.h b/include/asm-ia64/sn/shub_mmr.h index 2f88508..323fa0c 100644 --- a/include/asm-ia64/sn/shub_mmr.h +++ b/include/asm-ia64/sn/shub_mmr.h @@ -385,6 +385,17 @@ #define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000 /* ==================================================================== */ +/* Register "SH_IPI_ACCESS" */ +/* CPU interrupt Access Permission Bits */ +/* ==================================================================== */ + +#define SH1_IPI_ACCESS 0x0000000110060480 +#define SH2_IPI_ACCESS0 0x0000000010060c00 +#define SH2_IPI_ACCESS1 0x0000000010060c80 +#define SH2_IPI_ACCESS2 0x0000000010060d00 +#define SH2_IPI_ACCESS3 0x0000000010060d80 + +/* ==================================================================== */ /* Register "SH_INT_CMPB" */ /* RTC Compare Value for Processor B */ /* ==================================================================== */ @@ -429,6 +440,19 @@ #define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 #define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff +/* ==================================================================== */ +/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */ +/* privilege vector for acc=0 */ +/* ==================================================================== */ + +#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 0x0000000100030300 + +/* ==================================================================== */ +/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */ +/* privilege vector for acc=0 */ +/* ==================================================================== */ + +#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 0x0000000100050300 /* ==================================================================== */ /* Some MMRs are functionally identical (or close enough) on both SHUB1 */ -- cgit v0.10.2 From 7fbd2a5337b2aa91266abbded97330f909904fd5 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:11:00 -0700 Subject: [IA64-SGI] Add some needed externs currently not defined Add some needed externs currently not defined. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index e6d9aa6..635fdce 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h @@ -56,6 +56,12 @@ DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) + +extern u8 sn_partition_id; +extern u8 sn_system_size; +extern u8 sn_sharing_domain_size; +extern u8 sn_region_size; + extern void sn_flush_all_caches(long addr, long bytes); #endif /* _ASM_IA64_SN_ARCH_H */ -- cgit v0.10.2 From b0d82bd5df874f7dadbeced1b0163473387da37c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:46:00 -0700 Subject: [IA64-SGI] SGI Altix cross partition functionality (2nd This patch contains the shim module (XP) which interfaces between the communication module (XPC) and the functional support modules (like XPNET). Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cad8346..ce13ad6 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM If you are compiling a kernel that will run under SGI's IA-64 simulator (Medusa) then say Y, otherwise say N. +config IA64_SGI_SN_XP + tristate "Support communication between SGI SSIs" + depends on MSPEC + help + An SGI machine can be divided into multiple Single System + Images which act independently of each other and have + hardware based memory protection from the others. Enabling + this feature will allow for direct communication between SSIs + based on a network adapter and DMA messaging. + config FORCE_MAX_ZONEORDER int default "18" diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c new file mode 100644 index 0000000..3be52a3 --- /dev/null +++ b/arch/ia64/sn/kernel/xp_main.c @@ -0,0 +1,289 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition (XP) base. + * + * XP provides a base from which its users can interact + * with XPC, yet not be dependent on XPC. + * + */ + + +#include +#include +#include +#include +#include +#include + + +/* + * Target of nofault PIO read. + */ +u64 xp_nofault_PIOR_target; + + +/* + * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level + * users of XPC. + */ +struct xpc_registration xpc_registrations[XPC_NCHANNELS]; + + +/* + * Initialize the XPC interface to indicate that XPC isn't loaded. + */ +static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } + +struct xpc_interface xpc_interface = { + (void (*)(int)) xpc_notloaded, + (void (*)(int)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) + xpc_notloaded, + (void (*)(partid_t, int, void *)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded +}; + + +/* + * XPC calls this when it (the XPC module) has been loaded. + */ +void +xpc_set_interface(void (*connect)(int), + void (*disconnect)(int), + enum xpc_retval (*allocate)(partid_t, int, u32, void **), + enum xpc_retval (*send)(partid_t, int, void *), + enum xpc_retval (*send_notify)(partid_t, int, void *, + xpc_notify_func, void *), + void (*received)(partid_t, int, void *), + enum xpc_retval (*partid_to_nasids)(partid_t, void *)) +{ + xpc_interface.connect = connect; + xpc_interface.disconnect = disconnect; + xpc_interface.allocate = allocate; + xpc_interface.send = send; + xpc_interface.send_notify = send_notify; + xpc_interface.received = received; + xpc_interface.partid_to_nasids = partid_to_nasids; +} + + +/* + * XPC calls this when it (the XPC module) is being unloaded. + */ +void +xpc_clear_interface(void) +{ + xpc_interface.connect = (void (*)(int)) xpc_notloaded; + xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; + xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, + void **)) xpc_notloaded; + xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) + xpc_notloaded; + xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, + xpc_notify_func, void *)) xpc_notloaded; + xpc_interface.received = (void (*)(partid_t, int, void *)) + xpc_notloaded; + xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) + xpc_notloaded; +} + + +/* + * Register for automatic establishment of a channel connection whenever + * a partition comes up. + * + * Arguments: + * + * ch_number - channel # to register for connection. + * func - function to call for asynchronous notification of channel + * state changes (i.e., connection, disconnection, error) and + * the arrival of incoming messages. + * key - pointer to optional user-defined value that gets passed back + * to the user on any callouts made to func. + * payload_size - size in bytes of the XPC message's payload area which + * contains a user-defined message. The user should make + * this large enough to hold their largest message. + * nentries - max #of XPC message entries a message queue can contain. + * The actual number, which is determined when a connection + * is established and may be less then requested, will be + * passed to the user via the xpcConnected callout. + * assigned_limit - max number of kthreads allowed to be processing + * messages (per connection) at any given instant. + * idle_limit - max number of kthreads allowed to be idle at any given + * instant. + */ +enum xpc_retval +xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, + u16 nentries, u32 assigned_limit, u32 idle_limit) +{ + struct xpc_registration *registration; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(payload_size == 0 || nentries == 0); + DBUG_ON(func == NULL); + DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); + + registration = &xpc_registrations[ch_number]; + + if (down_interruptible(®istration->sema) != 0) { + return xpcInterrupted; + } + + /* if XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func != NULL) { + up(®istration->sema); + return xpcAlreadyRegistered; + } + + /* register the channel for connection */ + registration->msg_size = XPC_MSG_SIZE(payload_size); + registration->nentries = nentries; + registration->assigned_limit = assigned_limit; + registration->idle_limit = idle_limit; + registration->key = key; + registration->func = func; + + up(®istration->sema); + + xpc_interface.connect(ch_number); + + return xpcSuccess; +} + + +/* + * Remove the registration for automatic connection of the specified channel + * when a partition comes up. + * + * Before returning this xpc_disconnect() will wait for all connections on the + * specified channel have been closed/torndown. So the caller can be assured + * that they will not be receiving any more callouts from XPC to their + * function registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_disconnect(int ch_number) +{ + struct xpc_registration *registration; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + registration = &xpc_registrations[ch_number]; + + /* + * We've decided not to make this a down_interruptible(), since we + * figured XPC's users will just turn around and call xpc_disconnect() + * again anyways, so we might as well wait, if need be. + */ + down(®istration->sema); + + /* if !XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func == NULL) { + up(®istration->sema); + return; + } + + /* remove the connection registration for the specified channel */ + registration->func = NULL; + registration->key = NULL; + registration->nentries = 0; + registration->msg_size = 0; + registration->assigned_limit = 0; + registration->idle_limit = 0; + + xpc_interface.disconnect(ch_number); + + up(®istration->sema); + + return; +} + + +int __init +xp_init(void) +{ + int ret, ch_number; + u64 func_addr = *(u64 *) xp_nofault_PIOR; + u64 err_func_addr = *(u64 *) xp_error_PIOR; + + + if (!ia64_platform_is("sn2")) { + return -ENODEV; + } + + /* + * Register a nofault code region which performs a cross-partition + * PIO read. If the PIO read times out, the MCA handler will consume + * the error and return to a kernel-provided instruction to indicate + * an error. This PIO read exists because it is guaranteed to timeout + * if the destination is down (AMO operations do not timeout on at + * least some CPUs on Shubs <= v1.2, which unfortunately we have to + * work around). + */ + if ((ret = sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 1)) != 0) { + printk(KERN_ERR "XP: can't register nofault code, error=%d\n", + ret); + } + /* + * Setup the nofault PIO read target. (There is no special reason why + * SH_IPI_ACCESS was selected.) + */ + if (is_shub2()) { + xp_nofault_PIOR_target = SH2_IPI_ACCESS0; + } else { + xp_nofault_PIOR_target = SH1_IPI_ACCESS; + } + + /* initialize the connection registration semaphores */ + for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { + sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ + } + + return 0; +} +module_init(xp_init); + + +void __exit +xp_exit(void) +{ + u64 func_addr = *(u64 *) xp_nofault_PIOR; + u64 err_func_addr = *(u64 *) xp_error_PIOR; + + + /* unregister the PIO read nofault code region */ + (void) sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 0); +} +module_exit(xp_exit); + + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition (XP) base"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(xp_nofault_PIOR); +EXPORT_SYMBOL(xp_nofault_PIOR_target); +EXPORT_SYMBOL(xpc_registrations); +EXPORT_SYMBOL(xpc_interface); +EXPORT_SYMBOL(xpc_clear_interface); +EXPORT_SYMBOL(xpc_set_interface); +EXPORT_SYMBOL(xpc_connect); +EXPORT_SYMBOL(xpc_disconnect); + diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S new file mode 100644 index 0000000..b772543 --- /dev/null +++ b/arch/ia64/sn/kernel/xp_nofault.S @@ -0,0 +1,31 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * The xp_nofault_PIOR function takes a pointer to a remote PIO register + * and attempts to load and consume a value from it. This function + * will be registered as a nofault code block. In the event that the + * PIO read fails, the MCA handler will force the error to look + * corrected and vector to the xp_error_PIOR which will return an error. + * + * extern int xp_nofault_PIOR(void *remote_register); + */ + + .global xp_nofault_PIOR +xp_nofault_PIOR: + mov r8=r0 // Stage a success return value + ld8.acq r9=[r32];; // PIO Read the specified register + adds r9=1,r9 // Add to force a consume + br.ret.sptk.many b0;; // Return success + + .global xp_error_PIOR +xp_error_PIOR: + mov r8=1 // Return value of 1 + br.ret.sptk.many b0;; // Return failure + diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h new file mode 100644 index 0000000..9902185 --- /dev/null +++ b/include/asm-ia64/sn/xp.h @@ -0,0 +1,436 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved. + */ + + +/* + * External Cross Partition (XP) structures and defines. + */ + + +#ifndef _ASM_IA64_SN_XP_H +#define _ASM_IA64_SN_XP_H + + +#include +#include +#include +#include +#include + + +#ifdef USE_DBUG_ON +#define DBUG_ON(condition) BUG_ON(condition) +#else +#define DBUG_ON(condition) +#endif + + +/* + * Define the maximum number of logically defined partitions the system + * can support. It is constrained by the maximum number of hardware + * partitionable regions. The term 'region' in this context refers to the + * minimum number of nodes that can comprise an access protection grouping. + * The access protection is in regards to memory, IPI and IOI. + * + * The maximum number of hardware partitionable regions is equal to the + * maximum number of nodes in the entire system divided by the minimum number + * of nodes that comprise an access protection grouping. + */ +#define XP_MAX_PARTITIONS 64 + + +/* + * Define the number of u64s required to represent all the C-brick nasids + * as a bitmap. The cross-partition kernel modules deal only with + * C-brick nasids, thus the need for bitmaps which don't account for + * odd-numbered (non C-brick) nasids. + */ +#define XP_MAX_PHYSNODE_ID (MAX_PHYSNODE_ID / 2) +#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) +#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) + + +/* + * Wrapper for bte_copy() that should it return a failure status will retry + * the bte_copy() once in the hope that the failure was due to a temporary + * aberration (i.e., the link going down temporarily). + * + * See bte_copy for definition of the input parameters. + * + * Note: xp_bte_copy() should never be called while holding a spinlock. + */ +static inline bte_result_t +xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) +{ + bte_result_t ret; + + + ret = bte_copy(src, dest, len, mode, notification); + + if (ret != BTE_SUCCESS) { + if (!in_interrupt()) { + cond_resched(); + } + ret = bte_copy(src, dest, len, mode, notification); + } + + return ret; +} + + +/* + * XPC establishes channel connections between the local partition and any + * other partition that is currently up. Over these channels, kernel-level + * `users' can communicate with their counterparts on the other partitions. + * + * The maxinum number of channels is limited to eight. For performance reasons, + * the internal cross partition structures require sixteen bytes per channel, + * and eight allows all of this interface-shared info to fit in one cache line. + * + * XPC_NCHANNELS reflects the total number of channels currently defined. + * If the need for additional channels arises, one can simply increase + * XPC_NCHANNELS accordingly. If the day should come where that number + * exceeds the MAXIMUM number of channels allowed (eight), then one will need + * to make changes to the XPC code to allow for this. + */ +#define XPC_MEM_CHANNEL 0 /* memory channel number */ +#define XPC_NET_CHANNEL 1 /* network channel number */ + +#define XPC_NCHANNELS 2 /* #of defined channels */ +#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */ + +#if XPC_NCHANNELS > XPC_MAX_NCHANNELS +#error XPC_NCHANNELS exceeds MAXIMUM allowed. +#endif + + +/* + * The format of an XPC message is as follows: + * + * +-------+--------------------------------+ + * | flags |////////////////////////////////| + * +-------+--------------------------------+ + * | message # | + * +----------------------------------------+ + * | payload (user-defined message) | + * | | + * : + * | | + * +----------------------------------------+ + * + * The size of the payload is defined by the user via xpc_connect(). A user- + * defined message resides in the payload area. + * + * The user should have no dealings with the message header, but only the + * message's payload. When a message entry is allocated (via xpc_allocate()) + * a pointer to the payload area is returned and not the actual beginning of + * the XPC message. The user then constructs a message in the payload area + * and passes that pointer as an argument on xpc_send() or xpc_send_notify(). + * + * The size of a message entry (within a message queue) must be a cacheline + * sized multiple in order to facilitate the BTE transfer of messages from one + * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user + * that wants to fit as many msg entries as possible in a given memory size + * (e.g. a memory page). + */ +struct xpc_msg { + u8 flags; /* FOR XPC INTERNAL USE ONLY */ + u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ + s64 number; /* FOR XPC INTERNAL USE ONLY */ + + u64 payload; /* user defined portion of message */ +}; + + +#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) +#define XPC_MSG_SIZE(_payload_size) \ + L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) + + +/* + * Define the return values and values passed to user's callout functions. + * (It is important to add new value codes at the end just preceding + * xpcUnknownReason, which must have the highest numerical value.) + */ +enum xpc_retval { + xpcSuccess = 0, + + xpcNotConnected, /* 1: channel is not connected */ + xpcConnected, /* 2: channel connected (opened) */ + xpcRETIRED1, /* 3: (formerly xpcDisconnected) */ + + xpcMsgReceived, /* 4: message received */ + xpcMsgDelivered, /* 5: message delivered and acknowledged */ + + xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */ + + xpcNoWait, /* 7: operation would require wait */ + xpcRetry, /* 8: retry operation */ + xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ + xpcInterrupted, /* 10: interrupted wait */ + + xpcUnequalMsgSizes, /* 11: message size disparity between sides */ + xpcInvalidAddress, /* 12: invalid address */ + + xpcNoMemory, /* 13: no memory available for XPC structures */ + xpcLackOfResources, /* 14: insufficient resources for operation */ + xpcUnregistered, /* 15: channel is not registered */ + xpcAlreadyRegistered, /* 16: channel is already registered */ + + xpcPartitionDown, /* 17: remote partition is down */ + xpcNotLoaded, /* 18: XPC module is not loaded */ + xpcUnloading, /* 19: this side is unloading XPC module */ + + xpcBadMagic, /* 20: XPC MAGIC string not found */ + + xpcReactivating, /* 21: remote partition was reactivated */ + + xpcUnregistering, /* 22: this side is unregistering channel */ + xpcOtherUnregistering, /* 23: other side is unregistering channel */ + + xpcCloneKThread, /* 24: cloning kernel thread */ + xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */ + + xpcNoHeartbeat, /* 26: remote partition has no heartbeat */ + + xpcPioReadError, /* 27: PIO read error */ + xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */ + + xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */ + xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */ + xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */ + xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */ + xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */ + xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */ + xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */ + xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */ + xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */ + xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */ + + xpcBadVersion, /* 39: bad version number */ + xpcVarsNotSet, /* 40: the XPC variables are not set up */ + xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ + xpcInvalidPartid, /* 42: invalid partition ID */ + xpcLocalPartid, /* 43: local partition ID */ + + xpcUnknownReason /* 44: unknown reason -- must be last in list */ +}; + + +/* + * Define the callout function types used by XPC to update the user on + * connection activity and state changes (via the user function registered by + * xpc_connect()) and to notify them of messages received and delivered (via + * the user function registered by xpc_send_notify()). + * + * The two function types are xpc_channel_func and xpc_notify_func and + * both share the following arguments, with the exception of "data", which + * only xpc_channel_func has. + * + * Arguments: + * + * reason - reason code. (See following table.) + * partid - partition ID associated with condition. + * ch_number - channel # associated with condition. + * data - pointer to optional data. (See following table.) + * key - pointer to optional user-defined value provided as the "key" + * argument to xpc_connect() or xpc_send_notify(). + * + * In the following table the "Optional Data" column applies to callouts made + * to functions registered by xpc_connect(). A "NA" in that column indicates + * that this reason code can be passed to functions registered by + * xpc_send_notify() (i.e. they don't have data arguments). + * + * Also, the first three reason codes in the following table indicate + * success, whereas the others indicate failure. When a failure reason code + * is received, one can assume that the channel is not connected. + * + * + * Reason Code | Cause | Optional Data + * =====================+================================+===================== + * xpcConnected | connection has been established| max #of entries + * | to the specified partition on | allowed in message + * | the specified channel | queue + * ---------------------+--------------------------------+--------------------- + * xpcMsgReceived | an XPC message arrived from | address of payload + * | the specified partition on the | + * | specified channel | [the user must call + * | | xpc_received() when + * | | finished with the + * | | payload] + * ---------------------+--------------------------------+--------------------- + * xpcMsgDelivered | notification that the message | NA + * | was delivered to the intended | + * | recipient and that they have | + * | acknowledged its receipt by | + * | calling xpc_received() | + * =====================+================================+===================== + * xpcUnequalMsgSizes | can't connect to the specified | NULL + * | partition on the specified | + * | channel because of mismatched | + * | message sizes | + * ---------------------+--------------------------------+--------------------- + * xpcNoMemory | insufficient memory avaiable | NULL + * | to allocate message queue | + * ---------------------+--------------------------------+--------------------- + * xpcLackOfResources | lack of resources to create | NULL + * | the necessary kthreads to | + * | support the channel | + * ---------------------+--------------------------------+--------------------- + * xpcUnregistering | this side's user has | NULL or NA + * | unregistered by calling | + * | xpc_disconnect() | + * ---------------------+--------------------------------+--------------------- + * xpcOtherUnregistering| the other side's user has | NULL or NA + * | unregistered by calling | + * | xpc_disconnect() | + * ---------------------+--------------------------------+--------------------- + * xpcNoHeartbeat | the other side's XPC is no | NULL or NA + * | longer heartbeating | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcUnloading | this side's XPC module is | NULL or NA + * | being unloaded | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcOtherUnloading | the other side's XPC module is | NULL or NA + * | is being unloaded | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA + * | error while sending an IPI | + * | | + * ---------------------+--------------------------------+--------------------- + * xpcInvalidAddress | the address either received or | NULL or NA + * | sent by the specified partition| + * | is invalid | + * ---------------------+--------------------------------+--------------------- + * xpcBteNotAvailable | attempt to pull data from the | NULL or NA + * xpcBtePoisonError | specified partition over the | + * xpcBteWriteError | specified channel via a | + * xpcBteAccessError | bte_copy() failed | + * xpcBteTimeOutError | | + * xpcBteXtalkError | | + * xpcBteDirectoryError | | + * xpcBteGenericError | | + * xpcBteUnmappedError | | + * ---------------------+--------------------------------+--------------------- + * xpcUnknownReason | the specified channel to the | NULL or NA + * | specified partition was | + * | unavailable for unknown reasons| + * =====================+================================+===================== + */ + +typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid, + int ch_number, void *data, void *key); + +typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, + int ch_number, void *key); + + +/* + * The following is a registration entry. There is a global array of these, + * one per channel. It is used to record the connection registration made + * by the users of XPC. As long as a registration entry exists, for any + * partition that comes up, XPC will attempt to establish a connection on + * that channel. Notification that a connection has been made will occur via + * the xpc_channel_func function. + * + * The 'func' field points to the function to call when aynchronous + * notification is required for such events as: a connection established/lost, + * or an incomming message received, or an error condition encountered. A + * non-NULL 'func' field indicates that there is an active registration for + * the channel. + */ +struct xpc_registration { + struct semaphore sema; + xpc_channel_func func; /* function to call */ + void *key; /* pointer to user's key */ + u16 nentries; /* #of msg entries in local msg queue */ + u16 msg_size; /* message queue's message size */ + u32 assigned_limit; /* limit on #of assigned kthreads */ + u32 idle_limit; /* limit on #of idle kthreads */ +} ____cacheline_aligned; + + +#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) + + +/* the following are valid xpc_allocate() flags */ +#define XPC_WAIT 0 /* wait flag */ +#define XPC_NOWAIT 1 /* no wait flag */ + + +struct xpc_interface { + void (*connect)(int); + void (*disconnect)(int); + enum xpc_retval (*allocate)(partid_t, int, u32, void **); + enum xpc_retval (*send)(partid_t, int, void *); + enum xpc_retval (*send_notify)(partid_t, int, void *, + xpc_notify_func, void *); + void (*received)(partid_t, int, void *); + enum xpc_retval (*partid_to_nasids)(partid_t, void *); +}; + + +extern struct xpc_interface xpc_interface; + +extern void xpc_set_interface(void (*)(int), + void (*)(int), + enum xpc_retval (*)(partid_t, int, u32, void **), + enum xpc_retval (*)(partid_t, int, void *), + enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, + void *), + void (*)(partid_t, int, void *), + enum xpc_retval (*)(partid_t, void *)); +extern void xpc_clear_interface(void); + + +extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, + u16, u32, u32); +extern void xpc_disconnect(int); + +static inline enum xpc_retval +xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload) +{ + return xpc_interface.allocate(partid, ch_number, flags, payload); +} + +static inline enum xpc_retval +xpc_send(partid_t partid, int ch_number, void *payload) +{ + return xpc_interface.send(partid, ch_number, payload); +} + +static inline enum xpc_retval +xpc_send_notify(partid_t partid, int ch_number, void *payload, + xpc_notify_func func, void *key) +{ + return xpc_interface.send_notify(partid, ch_number, payload, func, key); +} + +static inline void +xpc_received(partid_t partid, int ch_number, void *payload) +{ + return xpc_interface.received(partid, ch_number, payload); +} + +static inline enum xpc_retval +xpc_partid_to_nasids(partid_t partid, void *nasids) +{ + return xpc_interface.partid_to_nasids(partid, nasids); +} + + +extern u64 xp_nofault_PIOR_target; +extern int xp_nofault_PIOR(void *); +extern int xp_error_PIOR(void); + + +#endif /* _ASM_IA64_SN_XP_H */ + -- cgit v0.10.2 From 21223a9e78050919499d3d9039170e608eb939cc Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 3 May 2005 12:25:50 -0700 Subject: [IA64] manually apply changes to arch/ia64/sn/kernel/Makefile cg-patch couldn't apply the patch to Makefile, and my dumb script rushed on and ran cg-commit without this change. Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 4f381fb..b1a4a23 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -4,10 +4,12 @@ # License. See the file "COPYING" in the main directory of this archive # for more details. # -# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. +# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved. # obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ huberror.o io_init.o iomv.o klconflib.o sn2/ obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o +obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o +xp-y := xp_main.o xp_nofault.o -- cgit v0.10.2 From 89eb8eb927e324366c3ac0458998aaf9953fc5cd Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:50:00 -0700 Subject: [IA64-SGI] SGI Altix cross partition functionality [2/3] This patch contains the communication module (XPC) for cross partition communication on a partitioned SGI Altix. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index b1a4a23..6959736 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o xp-y := xp_main.o xp_nofault.o +obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o +xpc-y := xpc_main.o xpc_channel.o xpc_partition.o diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h new file mode 100644 index 0000000..1a0aed8 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc.h @@ -0,0 +1,991 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) structures and macros. + */ + +#ifndef _IA64_SN_KERNEL_XPC_H +#define _IA64_SN_KERNEL_XPC_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * XPC Version numbers consist of a major and minor number. XPC can always + * talk to versions with same major #, and never talk to versions with a + * different major #. + */ +#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) +#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) + + +/* + * The next macros define word or bit representations for given + * C-brick nasid in either the SAL provided bit array representing + * nasids in the partition/machine or the AMO_t array used for + * inter-partition initiation communications. + * + * For SN2 machines, C-Bricks are alway even numbered NASIDs. As + * such, some space will be saved by insisting that nasid information + * passed from SAL always be packed for C-Bricks and the + * cross-partition interrupts use the same packing scheme. + */ +#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2) +#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1)) +#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \ + (1UL << XPC_NASID_B_INDEX(_n))) +#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) + +#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ +#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */ + +/* define the process name of HB checker and the CPU it is pinned to */ +#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" +#define XPC_HB_CHECK_CPU 0 + +/* define the process name of the discovery thread */ +#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" + + +#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p))) +#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p)) +#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p))) + + +/* + * Reserved Page provided by SAL. + * + * SAL provides one page per partition of reserved memory. When SAL + * initialization is complete, SAL_signature, SAL_version, partid, + * part_nasids, and mach_nasids are set. + * + * Note: Until vars_pa is set, the partition XPC code has not been initialized. + */ +struct xpc_rsvd_page { + u64 SAL_signature; /* SAL unique signature */ + u64 SAL_version; /* SAL specified version */ + u8 partid; /* partition ID from SAL */ + u8 version; + u8 pad[6]; /* pad to u64 align */ + u64 vars_pa; + u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; + u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; +}; +#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */ + +#define XPC_RSVD_PAGE_ALIGNED_SIZE \ + (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) + + +/* + * Define the structures by which XPC variables can be exported to other + * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) + */ + +/* + * The following structure describes the partition generic variables + * needed by other partitions in order to properly initialize. + * + * struct xpc_vars version number also applies to struct xpc_vars_part. + * Changes to either structure and/or related functionality should be + * reflected by incrementing either the major or minor version numbers + * of struct xpc_vars. + */ +struct xpc_vars { + u8 version; + u64 heartbeat; + u64 heartbeating_to_mask; + u64 kdb_status; /* 0 = machine running */ + int act_nasid; + int act_phys_cpuid; + u64 vars_part_pa; + u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ + AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ + AMO_t *act_amos; /* pointer to the first activation AMO */ +}; +#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */ + +#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) + +/* + * The following structure describes the per partition specific variables. + * + * An array of these structures, one per partition, will be defined. As a + * partition becomes active XPC will copy the array entry corresponding to + * itself from that partition. It is desirable that the size of this + * structure evenly divide into a cacheline, such that none of the entries + * in this array crosses a cacheline boundary. As it is now, each entry + * occupies half a cacheline. + */ +struct xpc_vars_part { + u64 magic; + + u64 openclose_args_pa; /* physical address of open and close args */ + u64 GPs_pa; /* physical address of Get/Put values */ + + u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ + int IPI_nasid; /* nasid of where to send IPIs */ + int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */ + + u8 nchannels; /* #of defined channels supported */ + + u8 reserved[23]; /* pad to a full 64 bytes */ +}; + +/* + * The vars_part MAGIC numbers play a part in the first contact protocol. + * + * MAGIC1 indicates that the per partition specific variables for a remote + * partition have been initialized by this partition. + * + * MAGIC2 indicates that this partition has pulled the remote partititions + * per partition variables that pertain to this partition. + */ +#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ +#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ + + + +/* + * Functions registered by add_timer() or called by kernel_thread() only + * allow for a single 64-bit argument. The following macros can be used to + * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from + * the passed argument. + */ +#define XPC_PACK_ARGS(_arg1, _arg2) \ + ((((u64) _arg1) & 0xffffffff) | \ + ((((u64) _arg2) & 0xffffffff) << 32)) + +#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) +#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) + + + +/* + * Define a Get/Put value pair (pointers) used with a message queue. + */ +struct xpc_gp { + s64 get; /* Get value */ + s64 put; /* Put value */ +}; + +#define XPC_GP_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) + + + +/* + * Define a structure that contains arguments associated with opening and + * closing a channel. + */ +struct xpc_openclose_args { + u16 reason; /* reason why channel is closing */ + u16 msg_size; /* sizeof each message entry */ + u16 remote_nentries; /* #of message entries in remote msg queue */ + u16 local_nentries; /* #of message entries in local msg queue */ + u64 local_msgqueue_pa; /* physical address of local message queue */ +}; + +#define XPC_OPENCLOSE_ARGS_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) + + + +/* struct xpc_msg flags */ + +#define XPC_M_DONE 0x01 /* msg has been received/consumed */ +#define XPC_M_READY 0x02 /* msg is ready to be sent */ +#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ + + +#define XPC_MSG_ADDRESS(_payload) \ + ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) + + + +/* + * Defines notify entry. + * + * This is used to notify a message's sender that their message was received + * and consumed by the intended recipient. + */ +struct xpc_notify { + struct semaphore sema; /* notify semaphore */ + u8 type; /* type of notification */ + + /* the following two fields are only used if type == XPC_N_CALL */ + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; + +/* struct xpc_notify type of notification */ + +#define XPC_N_CALL 0x01 /* notify function provided by user */ + + + +/* + * Define the structure that manages all the stuff required by a channel. In + * particular, they are used to manage the messages sent across the channel. + * + * This structure is private to a partition, and is NOT shared across the + * partition boundary. + * + * There is an array of these structures for each remote partition. It is + * allocated at the time a partition becomes active. The array contains one + * of these structures for each potential channel connection to that partition. + * + * Each of these structures manages two message queues (circular buffers). + * They are allocated at the time a channel connection is made. One of + * these message queues (local_msgqueue) holds the locally created messages + * that are destined for the remote partition. The other of these message + * queues (remote_msgqueue) is a locally cached copy of the remote partition's + * own local_msgqueue. + * + * The following is a description of the Get/Put pointers used to manage these + * two message queues. Consider the local_msgqueue to be on one partition + * and the remote_msgqueue to be its cached copy on another partition. A + * description of what each of the lettered areas contains is included. + * + * + * local_msgqueue remote_msgqueue + * + * |/////////| |/////////| + * w_remote_GP.get --> +---------+ |/////////| + * | F | |/////////| + * remote_GP.get --> +---------+ +---------+ <-- local_GP->get + * | | | | + * | | | E | + * | | | | + * | | +---------+ <-- w_local_GP.get + * | B | |/////////| + * | | |////D////| + * | | |/////////| + * | | +---------+ <-- w_remote_GP.put + * | | |////C////| + * local_GP->put --> +---------+ +---------+ <-- remote_GP.put + * | | |/////////| + * | A | |/////////| + * | | |/////////| + * w_local_GP.put --> +---------+ |/////////| + * |/////////| |/////////| + * + * + * ( remote_GP.[get|put] are cached copies of the remote + * partition's local_GP->[get|put], and thus their values can + * lag behind their counterparts on the remote partition. ) + * + * + * A - Messages that have been allocated, but have not yet been sent to the + * remote partition. + * + * B - Messages that have been sent, but have not yet been acknowledged by the + * remote partition as having been received. + * + * C - Area that needs to be prepared for the copying of sent messages, by + * the clearing of the message flags of any previously received messages. + * + * D - Area into which sent messages are to be copied from the remote + * partition's local_msgqueue and then delivered to their intended + * recipients. [ To allow for a multi-message copy, another pointer + * (next_msg_to_pull) has been added to keep track of the next message + * number needing to be copied (pulled). It chases after w_remote_GP.put. + * Any messages lying between w_local_GP.get and next_msg_to_pull have + * been copied and are ready to be delivered. ] + * + * E - Messages that have been copied and delivered, but have not yet been + * acknowledged by the recipient as having been received. + * + * F - Messages that have been acknowledged, but XPC has not yet notified the + * sender that the message was received by its intended recipient. + * This is also an area that needs to be prepared for the allocating of + * new messages, by the clearing of the message flags of the acknowledged + * messages. + */ +struct xpc_channel { + partid_t partid; /* ID of remote partition connected */ + spinlock_t lock; /* lock for updating this structure */ + u32 flags; /* general flags */ + + enum xpc_retval reason; /* reason why channel is disconnect'g */ + int reason_line; /* line# disconnect initiated from */ + + u16 number; /* channel # */ + + u16 msg_size; /* sizeof each msg entry */ + u16 local_nentries; /* #of msg entries in local msg queue */ + u16 remote_nentries; /* #of msg entries in remote msg queue*/ + + void *local_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *local_msgqueue; /* local message queue */ + void *remote_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ + /* local message queue */ + u64 remote_msgqueue_pa; /* phys addr of remote partition's */ + /* local message queue */ + + atomic_t references; /* #of external references to queues */ + + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ + + /* queue of msg senders who want to be notified when msg received */ + + atomic_t n_to_notify; /* #of msg senders to notify */ + struct xpc_notify *notify_queue;/* notify queue for messages sent */ + + xpc_channel_func func; /* user's channel function */ + void *key; /* pointer to user's key */ + + struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ + struct semaphore teardown_sema; /* wait for teardown completion */ + + struct xpc_openclose_args *local_openclose_args; /* args passed on */ + /* opening or closing of channel */ + + /* various flavors of local and remote Get/Put values */ + + struct xpc_gp *local_GP; /* local Get/Put values */ + struct xpc_gp remote_GP; /* remote Get/Put values */ + struct xpc_gp w_local_GP; /* working local Get/Put values */ + struct xpc_gp w_remote_GP; /* working remote Get/Put values */ + s64 next_msg_to_pull; /* Put value of next msg to pull */ + + /* kthread management related fields */ + +// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps +// >>> allow the assigned limit be unbounded and let the idle limit be dynamic +// >>> dependent on activity over the last interval of time + atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ + u32 kthreads_idle_limit; /* limit on #of kthreads idle */ + atomic_t kthreads_active; /* #of kthreads actively working */ + // >>> following field is temporary + u32 kthreads_created; /* total #of kthreads created */ + + wait_queue_head_t idle_wq; /* idle kthread wait queue */ + +} ____cacheline_aligned; + + +/* struct xpc_channel flags */ + +#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ + +#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ +#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ +#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ +#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ + +#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ +#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ +#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ + +#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ + +#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ + + + +/* + * Manages channels on a partition basis. There is one of these structures + * for each partition (a partition will never utilize the structure that + * represents itself). + */ +struct xpc_partition { + + /* XPC HB infrastructure */ + + u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ + u64 remote_vars_pa; /* phys addr of partition's vars */ + u64 remote_vars_part_pa; /* phys addr of partition's vars part */ + u64 last_heartbeat; /* HB at last read */ + u64 remote_amos_page_pa; /* phys addr of partition's amos page */ + int remote_act_nasid; /* active part's act/deact nasid */ + int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ + u32 act_IRQ_rcvd; /* IRQs since activation */ + spinlock_t act_lock; /* protect updating of act_state */ + u8 act_state; /* from XPC HB viewpoint */ + enum xpc_retval reason; /* reason partition is deactivating */ + int reason_line; /* line# deactivation initiated from */ + int reactivate_nasid; /* nasid in partition to reactivate */ + + + /* XPC infrastructure referencing and teardown control */ + + u8 setup_state; /* infrastructure setup state */ + wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ + atomic_t references; /* #of references to infrastructure */ + + + /* + * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN + * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION + * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE + * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) + */ + + + u8 nchannels; /* #of defined channels supported */ + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ + struct xpc_channel *channels;/* array of channel structures */ + + void *local_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp *local_GPs; /* local Get/Put values */ + void *remote_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ + /* values */ + u64 remote_GPs_pa; /* phys address of remote partition's local */ + /* Get/Put values */ + + + /* fields used to pass args when opening or closing a channel */ + + void *local_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *local_openclose_args; /* local's args */ + void *remote_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ + /* args */ + u64 remote_openclose_args_pa; /* phys addr of remote's args */ + + + /* IPI sending, receiving and handling related fields */ + + int remote_IPI_nasid; /* nasid of where to send IPIs */ + int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ + AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ + + AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ + u64 local_IPI_amo; /* IPI amo flags yet to be handled */ + char IPI_owner[8]; /* IPI owner's name */ + struct timer_list dropped_IPI_timer; /* dropped IPI timer */ + + spinlock_t IPI_lock; /* IPI handler lock */ + + + /* channel manager related fields */ + + atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ + +} ____cacheline_aligned; + + +/* struct xpc_partition act_state values (for XPC HB) */ + +#define XPC_P_INACTIVE 0x00 /* partition is not active */ +#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ +#define XPC_P_ACTIVATING 0x02 /* activation thread started */ +#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ +#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ + + +#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ + xpc_deactivate_partition(__LINE__, (_p), (_reason)) + + +/* struct xpc_partition setup_state values */ + +#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ +#define XPC_P_SETUP 0x01 /* infrastructure is setup */ +#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ +#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ + + +/* + * struct xpc_partition IPI_timer #of seconds to wait before checking for + * dropped IPIs. These occur whenever an IPI amo write doesn't complete until + * after the IPI was received. + */ +#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) + + +#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) + + + +/* found in xp_main.c */ +extern struct xpc_registration xpc_registrations[]; + + +/* >>> found in xpc_main.c only */ +extern struct device *xpc_part; +extern struct device *xpc_chan; +extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); +extern void xpc_dropped_IPI_check(struct xpc_partition *); +extern void xpc_activate_kthreads(struct xpc_channel *, int); +extern void xpc_create_kthreads(struct xpc_channel *, int); +extern void xpc_disconnect_wait(int); + + +/* found in xpc_main.c and efi-xpc.c */ +extern void xpc_activate_partition(struct xpc_partition *); + + +/* found in xpc_partition.c */ +extern int xpc_exiting; +extern int xpc_hb_interval; +extern int xpc_hb_check_interval; +extern struct xpc_vars *xpc_vars; +extern struct xpc_rsvd_page *xpc_rsvd_page; +extern struct xpc_vars_part *xpc_vars_part; +extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; +extern char xpc_remote_copy_buffer[]; +extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); +extern void xpc_allow_IPI_ops(void); +extern void xpc_restrict_IPI_ops(void); +extern int xpc_identify_act_IRQ_sender(void); +extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); +extern void xpc_mark_partition_inactive(struct xpc_partition *); +extern void xpc_discovery(void); +extern void xpc_check_remote_hb(void); +extern void xpc_deactivate_partition(const int, struct xpc_partition *, + enum xpc_retval); +extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); + + +/* found in xpc_channel.c */ +extern void xpc_initiate_connect(int); +extern void xpc_initiate_disconnect(int); +extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); +extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); +extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, + xpc_notify_func, void *); +extern void xpc_initiate_received(partid_t, int, void *); +extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); +extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); +extern void xpc_process_channel_activity(struct xpc_partition *); +extern void xpc_connected_callout(struct xpc_channel *); +extern void xpc_deliver_msg(struct xpc_channel *); +extern void xpc_disconnect_channel(const int, struct xpc_channel *, + enum xpc_retval, unsigned long *); +extern void xpc_disconnected_callout(struct xpc_channel *); +extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval); +extern void xpc_teardown_infrastructure(struct xpc_partition *); + + + +static inline void +xpc_wakeup_channel_mgr(struct xpc_partition *part) +{ + if (atomic_inc_return(&part->channel_mgr_requests) == 1) { + wake_up(&part->channel_mgr_wq); + } +} + + + +/* + * These next two inlines are used to keep us from tearing down a channel's + * msg queues while a thread may be referencing them. + */ +static inline void +xpc_msgqueue_ref(struct xpc_channel *ch) +{ + atomic_inc(&ch->references); +} + +static inline void +xpc_msgqueue_deref(struct xpc_channel *ch) +{ + s32 refs = atomic_dec_return(&ch->references); + + DBUG_ON(refs < 0); + if (refs == 0) { + xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); + } +} + + + +#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ + xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) + + +/* + * These two inlines are used to keep us from tearing down a partition's + * setup infrastructure while a thread may be referencing it. + */ +static inline void +xpc_part_deref(struct xpc_partition *part) +{ + s32 refs = atomic_dec_return(&part->references); + + + DBUG_ON(refs < 0); + if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { + wake_up(&part->teardown_wq); + } +} + +static inline int +xpc_part_ref(struct xpc_partition *part) +{ + int setup; + + + atomic_inc(&part->references); + setup = (part->setup_state == XPC_P_SETUP); + if (!setup) { + xpc_part_deref(part); + } + return setup; +} + + + +/* + * The following macro is to be used for the setting of the reason and + * reason_line fields in both the struct xpc_channel and struct xpc_partition + * structures. + */ +#define XPC_SET_REASON(_p, _reason, _line) \ + { \ + (_p)->reason = _reason; \ + (_p)->reason_line = _line; \ + } + + + +/* + * The following set of macros and inlines are used for the sending and + * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, + * one that is associated with partition activity (SGI_XPC_ACTIVATE) and + * the other that is associated with channel activity (SGI_XPC_NOTIFY). + */ + +static inline u64 +xpc_IPI_receive(AMO_t *amo) +{ + return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); +} + + +static inline enum xpc_retval +xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) +{ + int ret = 0; + unsigned long irq_flags; + + + local_irq_save(irq_flags); + + FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); + sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); + + return ((ret == 0) ? xpcSuccess : xpcPioReadError); +} + + +/* + * IPIs associated with SGI_XPC_ACTIVATE IRQ. + */ + +/* + * Flag the appropriate AMO variable and send an IPI to the specified node. + */ +static inline void +xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid, + int to_phys_cpuid) +{ + int w_index = XPC_NASID_W_INDEX(from_nasid); + int b_index = XPC_NASID_B_INDEX(from_nasid); + AMO_t *amos = (AMO_t *) __va(amos_page + + (XP_MAX_PARTITIONS * sizeof(AMO_t))); + + + (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, + to_phys_cpuid, SGI_XPC_ACTIVATE); +} + +static inline void +xpc_IPI_send_activate(struct xpc_vars *vars) +{ + xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), + vars->act_nasid, vars->act_phys_cpuid); +} + +static inline void +xpc_IPI_send_activated(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), + part->remote_act_nasid, part->remote_act_phys_cpuid); +} + +static inline void +xpc_IPI_send_reactivate(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, + xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); +} + + +/* + * IPIs associated with SGI_XPC_NOTIFY IRQ. + */ + +/* + * Send an IPI to the remote partition that is associated with the + * specified channel. + */ +#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ + xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f) + +static inline void +xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, + unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + enum xpc_retval ret; + + + if (likely(part->act_state != XPC_P_DEACTIVATING)) { + ret = xpc_IPI_send(part->remote_IPI_amo_va, + (u64) ipi_flag << (ch->number * 8), + part->remote_IPI_nasid, + part->remote_IPI_phys_cpuid, + SGI_XPC_NOTIFY); + dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", + ipi_flag_string, ch->partid, ch->number, ret); + if (unlikely(ret != xpcSuccess)) { + if (irq_flags != NULL) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + } + XPC_DEACTIVATE_PARTITION(part, ret); + if (irq_flags != NULL) { + spin_lock_irqsave(&ch->lock, *irq_flags); + } + } + } +} + + +/* + * Make it look like the remote partition, which is associated with the + * specified channel, sent us an IPI. This faked IPI will be handled + * by xpc_dropped_IPI_check(). + */ +#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ + xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f) + +static inline void +xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, + char *ipi_flag_string) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + + + FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), + FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); + dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", + ipi_flag_string, ch->partid, ch->number); +} + + +/* + * The sending and receiving of IPIs includes the setting of an AMO variable + * to indicate the reason the IPI was sent. The 64-bit variable is divided + * up into eight bytes, ordered from right to left. Byte zero pertains to + * channel 0, byte one to channel 1, and so on. Each byte is described by + * the following IPI flags. + */ + +#define XPC_IPI_CLOSEREQUEST 0x01 +#define XPC_IPI_CLOSEREPLY 0x02 +#define XPC_IPI_OPENREQUEST 0x04 +#define XPC_IPI_OPENREPLY 0x08 +#define XPC_IPI_MSGREQUEST 0x10 + + +/* given an AMO variable and a channel#, get its associated IPI flags */ +#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) + +#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) +#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) + + +static inline void +xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + + args->reason = ch->reason; + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); +} + +static inline void +xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags) +{ + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags); +} + +static inline void +xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + + args->msg_size = ch->msg_size; + args->local_nentries = ch->local_nentries; + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags); +} + +static inline void +xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + + args->remote_nentries = ch->remote_nentries; + args->local_nentries = ch->local_nentries; + args->local_msgqueue_pa = __pa(ch->local_msgqueue); + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags); +} + +static inline void +xpc_IPI_send_msgrequest(struct xpc_channel *ch) +{ + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL); +} + +static inline void +xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) +{ + XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); +} + + +/* + * Memory for XPC's AMO variables is allocated by the MSPEC driver. These + * pages are located in the lowest granule. The lowest granule uses 4k pages + * for cached references and an alternate TLB handler to never provide a + * cacheable mapping for the entire region. This will prevent speculative + * reading of cached copies of our lines from being issued which will cause + * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 + * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c) + * and an additional 16 AMO variables for partition activation (xpc_hb.c). + */ +static inline AMO_t * +xpc_IPI_init(partid_t partid) +{ + AMO_t *part_amo = xpc_vars->amos_page + partid; + + + xpc_IPI_receive(part_amo); + return part_amo; +} + + + +static inline enum xpc_retval +xpc_map_bte_errors(bte_result_t error) +{ + switch (error) { + case BTE_SUCCESS: return xpcSuccess; + case BTEFAIL_DIR: return xpcBteDirectoryError; + case BTEFAIL_POISON: return xpcBtePoisonError; + case BTEFAIL_WERR: return xpcBteWriteError; + case BTEFAIL_ACCESS: return xpcBteAccessError; + case BTEFAIL_PWERR: return xpcBtePWriteError; + case BTEFAIL_PRERR: return xpcBtePReadError; + case BTEFAIL_TOUT: return xpcBteTimeOutError; + case BTEFAIL_XTERR: return xpcBteXtalkError; + case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; + default: return xpcBteUnmappedError; + } +} + + + +static inline void * +xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base) +{ + /* see if kmalloc will give us cachline aligned memory by default */ + *base = kmalloc(size, flags); + if (*base == NULL) { + return NULL; + } + if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { + return *base; + } + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kmalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) { + return NULL; + } + return (void *) L1_CACHE_ALIGN((u64) *base); +} + + +/* + * Check to see if there is any channel activity to/from the specified + * partition. + */ +static inline void +xpc_check_for_channel_activity(struct xpc_partition *part) +{ + u64 IPI_amo; + unsigned long irq_flags; + + + IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); + if (IPI_amo == 0) { + return; + } + + spin_lock_irqsave(&part->IPI_lock, irq_flags); + part->local_IPI_amo |= IPI_amo; + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); + + dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", + XPC_PARTID(part), IPI_amo); + + xpc_wakeup_channel_mgr(part); +} + + +#endif /* _IA64_SN_KERNEL_XPC_H */ + diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c new file mode 100644 index 0000000..0bf6fbc --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -0,0 +1,2297 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) channel support. + * + * This is the part of XPC that manages the channels and + * sends/receives messages across them to/from other partitions. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include "xpc.h" + + +/* + * Set up the initial values for the XPartition Communication channels. + */ +static void +xpc_initialize_channels(struct xpc_partition *part, partid_t partid) +{ + int ch_number; + struct xpc_channel *ch; + + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + ch->partid = partid; + ch->number = ch_number; + ch->flags = XPC_C_DISCONNECTED; + + ch->local_GP = &part->local_GPs[ch_number]; + ch->local_openclose_args = + &part->local_openclose_args[ch_number]; + + atomic_set(&ch->kthreads_assigned, 0); + atomic_set(&ch->kthreads_idle, 0); + atomic_set(&ch->kthreads_active, 0); + + atomic_set(&ch->references, 0); + atomic_set(&ch->n_to_notify, 0); + + spin_lock_init(&ch->lock); + sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ + + atomic_set(&ch->n_on_msg_allocate_wq, 0); + init_waitqueue_head(&ch->msg_allocate_wq); + init_waitqueue_head(&ch->idle_wq); + } +} + + +/* + * Setup the infrastructure necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +enum xpc_retval +xpc_setup_infrastructure(struct xpc_partition *part) +{ + int ret; + struct timer_list *timer; + partid_t partid = XPC_PARTID(part); + + + /* + * Zero out MOST of the entry for this partition. Only the fields + * starting with `nchannels' will be zeroed. The preceding fields must + * remain `viable' across partition ups and downs, since they may be + * referenced during this memset() operation. + */ + memset(&part->nchannels, 0, sizeof(struct xpc_partition) - + offsetof(struct xpc_partition, nchannels)); + + /* + * Allocate all of the channel structures as a contiguous chunk of + * memory. + */ + part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, + GFP_KERNEL); + if (part->channels == NULL) { + dev_err(xpc_chan, "can't get memory for channels\n"); + return xpcNoMemory; + } + memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS); + + part->nchannels = XPC_NCHANNELS; + + + /* allocate all the required GET/PUT values */ + + part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, + GFP_KERNEL, &part->local_GPs_base); + if (part->local_GPs == NULL) { + kfree(part->channels); + part->channels = NULL; + dev_err(xpc_chan, "can't get memory for local get/put " + "values\n"); + return xpcNoMemory; + } + memset(part->local_GPs, 0, XPC_GP_SIZE); + + part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, + GFP_KERNEL, &part->remote_GPs_base); + if (part->remote_GPs == NULL) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + dev_err(xpc_chan, "can't get memory for remote get/put " + "values\n"); + return xpcNoMemory; + } + memset(part->remote_GPs, 0, XPC_GP_SIZE); + + + /* allocate all the required open and close args */ + + part->local_openclose_args = xpc_kmalloc_cacheline_aligned( + XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, + &part->local_openclose_args_base); + if (part->local_openclose_args == NULL) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + dev_err(xpc_chan, "can't get memory for local connect args\n"); + return xpcNoMemory; + } + memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); + + part->remote_openclose_args = xpc_kmalloc_cacheline_aligned( + XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, + &part->remote_openclose_args_base); + if (part->remote_openclose_args == NULL) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + dev_err(xpc_chan, "can't get memory for remote connect args\n"); + return xpcNoMemory; + } + memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); + + + xpc_initialize_channels(part, partid); + + atomic_set(&part->nchannels_active, 0); + + + /* local_IPI_amo were set to 0 by an earlier memset() */ + + /* Initialize this partitions AMO_t structure */ + part->local_IPI_amo_va = xpc_IPI_init(partid); + + spin_lock_init(&part->IPI_lock); + + atomic_set(&part->channel_mgr_requests, 1); + init_waitqueue_head(&part->channel_mgr_wq); + + sprintf(part->IPI_owner, "xpc%02d", partid); + ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, + part->IPI_owner, (void *) (u64) partid); + if (ret != 0) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " + "errno=%d\n", -ret); + return xpcLackOfResources; + } + + /* Setup a timer to check for dropped IPIs */ + timer = &part->dropped_IPI_timer; + init_timer(timer); + timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; + timer->data = (unsigned long) part; + timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; + add_timer(timer); + + /* + * With the setting of the partition setup_state to XPC_P_SETUP, we're + * declaring that this partition is ready to go. + */ + (volatile u8) part->setup_state = XPC_P_SETUP; + + + /* + * Setup the per partition specific variables required by the + * remote partition to establish channel connections with us. + * + * The setting of the magic # indicates that these per partition + * specific variables are ready to be used. + */ + xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); + xpc_vars_part[partid].openclose_args_pa = + __pa(part->local_openclose_args); + xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); + xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id()); + xpc_vars_part[partid].IPI_phys_cpuid = + cpu_physical_id(smp_processor_id()); + xpc_vars_part[partid].nchannels = part->nchannels; + (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1; + + return xpcSuccess; +} + + +/* + * Create a wrapper that hides the underlying mechanism for pulling a cacheline + * (or multiple cachelines) from a remote partition. + * + * src must be a cacheline aligned physical address on the remote partition. + * dst must be a cacheline aligned virtual address on this partition. + * cnt must be an cacheline sized + */ +static enum xpc_retval +xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, + const void *src, size_t cnt) +{ + bte_result_t bte_ret; + + + DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); + DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst)); + DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); + + if (part->act_state == XPC_P_DEACTIVATING) { + return part->reason; + } + + bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), + (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); + if (bte_ret == BTE_SUCCESS) { + return xpcSuccess; + } + + dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", + XPC_PARTID(part), bte_ret); + + return xpc_map_bte_errors(bte_ret); +} + + +/* + * Pull the remote per partititon specific variables from the specified + * partition. + */ +enum xpc_retval +xpc_pull_remote_vars_part(struct xpc_partition *part) +{ + u8 buffer[L1_CACHE_BYTES * 2]; + struct xpc_vars_part *pulled_entry_cacheline = + (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); + struct xpc_vars_part *pulled_entry; + u64 remote_entry_cacheline_pa, remote_entry_pa; + partid_t partid = XPC_PARTID(part); + enum xpc_retval ret; + + + /* pull the cacheline that contains the variables we're interested in */ + + DBUG_ON(part->remote_vars_part_pa != + L1_CACHE_ALIGN(part->remote_vars_part_pa)); + DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); + + remote_entry_pa = part->remote_vars_part_pa + + sn_partition_id * sizeof(struct xpc_vars_part); + + remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); + + pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + + (remote_entry_pa & (L1_CACHE_BYTES - 1))); + + ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, + (void *) remote_entry_cacheline_pa, + L1_CACHE_BYTES); + if (ret != xpcSuccess) { + dev_dbg(xpc_chan, "failed to pull XPC vars_part from " + "partition %d, ret=%d\n", partid, ret); + return ret; + } + + + /* see if they've been set up yet */ + + if (pulled_entry->magic != XPC_VP_MAGIC1 && + pulled_entry->magic != XPC_VP_MAGIC2) { + + if (pulled_entry->magic != 0) { + dev_dbg(xpc_chan, "partition %d's XPC vars_part for " + "partition %d has bad magic value (=0x%lx)\n", + partid, sn_partition_id, pulled_entry->magic); + return xpcBadMagic; + } + + /* they've not been initialized yet */ + return xpcRetry; + } + + if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { + + /* validate the variables */ + + if (pulled_entry->GPs_pa == 0 || + pulled_entry->openclose_args_pa == 0 || + pulled_entry->IPI_amo_pa == 0) { + + dev_err(xpc_chan, "partition %d's XPC vars_part for " + "partition %d are not valid\n", partid, + sn_partition_id); + return xpcInvalidAddress; + } + + /* the variables we imported look to be valid */ + + part->remote_GPs_pa = pulled_entry->GPs_pa; + part->remote_openclose_args_pa = + pulled_entry->openclose_args_pa; + part->remote_IPI_amo_va = + (AMO_t *) __va(pulled_entry->IPI_amo_pa); + part->remote_IPI_nasid = pulled_entry->IPI_nasid; + part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; + + if (part->nchannels > pulled_entry->nchannels) { + part->nchannels = pulled_entry->nchannels; + } + + /* let the other side know that we've pulled their variables */ + + (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2; + } + + if (pulled_entry->magic == XPC_VP_MAGIC1) { + return xpcRetry; + } + + return xpcSuccess; +} + + +/* + * Get the IPI flags and pull the openclose args and/or remote GPs as needed. + */ +static u64 +xpc_get_IPI_flags(struct xpc_partition *part) +{ + unsigned long irq_flags; + u64 IPI_amo; + enum xpc_retval ret; + + + /* + * See if there are any IPI flags to be handled. + */ + + spin_lock_irqsave(&part->IPI_lock, irq_flags); + if ((IPI_amo = part->local_IPI_amo) != 0) { + part->local_IPI_amo = 0; + } + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); + + + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { + ret = xpc_pull_remote_cachelines(part, + part->remote_openclose_args, + (void *) part->remote_openclose_args_pa, + XPC_OPENCLOSE_ARGS_SIZE); + if (ret != xpcSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull openclose args from " + "partition %d, ret=%d\n", XPC_PARTID(part), + ret); + + /* don't bother processing IPIs anymore */ + IPI_amo = 0; + } + } + + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { + ret = xpc_pull_remote_cachelines(part, part->remote_GPs, + (void *) part->remote_GPs_pa, + XPC_GP_SIZE); + if (ret != xpcSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull GPs from partition " + "%d, ret=%d\n", XPC_PARTID(part), ret); + + /* don't bother processing IPIs anymore */ + IPI_amo = 0; + } + } + + return IPI_amo; +} + + +/* + * Allocate the local message queue and the notify queue. + */ +static enum xpc_retval +xpc_allocate_local_msgqueue(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int nentries; + size_t nbytes; + + + // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between + // >>> iterations of the for-loop, bail if set? + + // >>> should we impose a minumum #of entries? like 4 or 8? + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->msg_size; + ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, + (GFP_KERNEL | GFP_DMA), + &ch->local_msgqueue_base); + if (ch->local_msgqueue == NULL) { + continue; + } + memset(ch->local_msgqueue, 0, nbytes); + + nbytes = nentries * sizeof(struct xpc_notify); + ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA)); + if (ch->notify_queue == NULL) { + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + continue; + } + memset(ch->notify_queue, 0, nbytes); + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->local_nentries, ch->partid, ch->number); + + ch->local_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for local message queue and notify " + "queue, partid=%d, channel=%d\n", ch->partid, ch->number); + return xpcNoMemory; +} + + +/* + * Allocate the cached remote message queue. + */ +static enum xpc_retval +xpc_allocate_remote_msgqueue(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int nentries; + size_t nbytes; + + + DBUG_ON(ch->remote_nentries <= 0); + + // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between + // >>> iterations of the for-loop, bail if set? + + // >>> should we impose a minumum #of entries? like 4 or 8? + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->msg_size; + ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, + (GFP_KERNEL | GFP_DMA), + &ch->remote_msgqueue_base); + if (ch->remote_msgqueue == NULL) { + continue; + } + memset(ch->remote_msgqueue, 0, nbytes); + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->remote_nentries, ch->partid, ch->number); + + ch->remote_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + return xpcNoMemory; +} + + +/* + * Allocate message queues and other stuff associated with a channel. + * + * Note: Assumes all of the channel sizes are filled in. + */ +static enum xpc_retval +xpc_allocate_msgqueues(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int i; + enum xpc_retval ret; + + + DBUG_ON(ch->flags & XPC_C_SETUP); + + if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { + return ret; + } + + if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + kfree(ch->notify_queue); + ch->notify_queue = NULL; + return ret; + } + + for (i = 0; i < ch->local_nentries; i++) { + /* use a semaphore as an event wait queue */ + sema_init(&ch->notify_queue[i].sema, 0); + } + + sema_init(&ch->teardown_sema, 0); /* event wait */ + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_SETUP; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpcSuccess; +} + + +/* + * Process a connect message from a remote partition. + * + * Note: xpc_process_connect() is expecting to be called with the + * spin_lock_irqsave held and will leave it locked upon return. + */ +static void +xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + enum xpc_retval ret; + + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_ROPENREQUEST)) { + /* nothing more to do for now */ + return; + } + DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); + + if (!(ch->flags & XPC_C_SETUP)) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + ret = xpc_allocate_msgqueues(ch); + spin_lock_irqsave(&ch->lock, *irq_flags); + + if (ret != xpcSuccess) { + XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); + } + if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { + return; + } + + DBUG_ON(!(ch->flags & XPC_C_SETUP)); + DBUG_ON(ch->local_msgqueue == NULL); + DBUG_ON(ch->remote_msgqueue == NULL); + } + + if (!(ch->flags & XPC_C_OPENREPLY)) { + ch->flags |= XPC_C_OPENREPLY; + xpc_IPI_send_openreply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENREPLY)) { + return; + } + + DBUG_ON(ch->remote_msgqueue_pa == 0); + + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ + + dev_info(xpc_chan, "channel %d to partition %d connected\n", + ch->number, ch->partid); + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_create_kthreads(ch, 1); + spin_lock_irqsave(&ch->lock, *irq_flags); +} + + +/* + * Free up message queues and other stuff that were allocated for the specified + * channel. + * + * Note: ch->reason and ch->reason_line are left set for debugging purposes, + * they're cleared when XPC_C_DISCONNECTED is cleared. + */ +static void +xpc_free_msgqueues(struct xpc_channel *ch) +{ + DBUG_ON(!spin_is_locked(&ch->lock)); + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); + + ch->remote_msgqueue_pa = 0; + ch->func = NULL; + ch->key = NULL; + ch->msg_size = 0; + ch->local_nentries = 0; + ch->remote_nentries = 0; + ch->kthreads_assigned_limit = 0; + ch->kthreads_idle_limit = 0; + + ch->local_GP->get = 0; + ch->local_GP->put = 0; + ch->remote_GP.get = 0; + ch->remote_GP.put = 0; + ch->w_local_GP.get = 0; + ch->w_local_GP.put = 0; + ch->w_remote_GP.get = 0; + ch->w_remote_GP.put = 0; + ch->next_msg_to_pull = 0; + + if (ch->flags & XPC_C_SETUP) { + ch->flags &= ~XPC_C_SETUP; + + dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", + ch->flags, ch->partid, ch->number); + + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + kfree(ch->remote_msgqueue_base); + ch->remote_msgqueue = NULL; + kfree(ch->notify_queue); + ch->notify_queue = NULL; + + /* in case someone is waiting for the teardown to complete */ + up(&ch->teardown_sema); + } +} + + +/* + * spin_lock_irqsave() is expected to be held on entry. + */ +static void +xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + u32 ch_flags = ch->flags; + + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + return; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + /* make sure all activity has settled down first */ + + if (atomic_read(&ch->references) > 0) { + return; + } + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + + /* it's now safe to free the channel's message queues */ + + xpc_free_msgqueues(ch); + DBUG_ON(ch->flags & XPC_C_SETUP); + + if (part->act_state != XPC_P_DEACTIVATING) { + + /* as long as the other side is up do the full protocol */ + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { + return; + } + + if (!(ch->flags & XPC_C_CLOSEREPLY)) { + ch->flags |= XPC_C_CLOSEREPLY; + xpc_IPI_send_closereply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_RCLOSEREPLY)) { + return; + } + } + + /* both sides are disconnected now */ + + ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */ + + atomic_dec(&part->nchannels_active); + + if (ch_flags & XPC_C_WASCONNECTED) { + dev_info(xpc_chan, "channel %d to partition %d disconnected, " + "reason=%d\n", ch->number, ch->partid, ch->reason); + } +} + + +/* + * Process a change in the channel's remote connection state. + */ +static void +xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, + u8 IPI_flags) +{ + unsigned long irq_flags; + struct xpc_openclose_args *args = + &part->remote_openclose_args[ch_number]; + struct xpc_channel *ch = &part->channels[ch_number]; + enum xpc_retval reason; + + + + spin_lock_irqsave(&ch->lock, irq_flags); + + + if (IPI_flags & XPC_IPI_CLOSEREQUEST) { + + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " + "from partid=%d, channel=%d\n", args->reason, + ch->partid, ch->number); + + /* + * If RCLOSEREQUEST is set, we're probably waiting for + * RCLOSEREPLY. We should find it and a ROPENREQUEST packed + * with this RCLOSEQREUQEST in the IPI_flags. + */ + + if (ch->flags & XPC_C_RCLOSEREQUEST) { + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); + DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); + + DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); + IPI_flags &= ~XPC_IPI_CLOSEREPLY; + ch->flags |= XPC_C_RCLOSEREPLY; + + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + + if (ch->flags & XPC_C_DISCONNECTED) { + // >>> explain this section + + if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { + DBUG_ON(part->act_state != + XPC_P_DEACTIVATING); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); + } + + IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); + + /* + * The meaningful CLOSEREQUEST connection state fields are: + * reason = reason connection is to be closed + */ + + ch->flags |= XPC_C_RCLOSEREQUEST; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + reason = args->reason; + if (reason <= xpcSuccess || reason > xpcUnknownReason) { + reason = xpcUnknownReason; + } else if (reason == xpcUnregistering) { + reason = xpcOtherUnregistering; + } + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + } else { + xpc_process_disconnect(ch, &irq_flags); + } + } + + + if (IPI_flags & XPC_IPI_CLOSEREPLY) { + + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," + " channel=%d\n", ch->partid, ch->number); + + if (ch->flags & XPC_C_DISCONNECTED) { + DBUG_ON(part->act_state != XPC_P_DEACTIVATING); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST)); + + ch->flags |= XPC_C_RCLOSEREPLY; + + if (ch->flags & XPC_C_CLOSEREPLY) { + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + } + + + if (IPI_flags & XPC_IPI_OPENREQUEST) { + + dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " + "local_nentries=%d) received from partid=%d, " + "channel=%d\n", args->msg_size, args->local_nentries, + ch->partid, ch->number); + + if ((ch->flags & XPC_C_DISCONNECTING) || + part->act_state == XPC_P_DEACTIVATING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | + XPC_C_OPENREQUEST))); + DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_OPENREPLY | XPC_C_CONNECTED)); + + /* + * The meaningful OPENREQUEST connection state fields are: + * msg_size = size of channel's messages in bytes + * local_nentries = remote partition's local_nentries + */ + DBUG_ON(args->msg_size == 0); + DBUG_ON(args->local_nentries == 0); + + ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); + ch->remote_nentries = args->local_nentries; + + + if (ch->flags & XPC_C_OPENREQUEST) { + if (args->msg_size != ch->msg_size) { + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + } else { + ch->msg_size = args->msg_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + } + + xpc_process_connect(ch, &irq_flags); + } + + + if (IPI_flags & XPC_IPI_OPENREPLY) { + + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " + "local_nentries=%d, remote_nentries=%d) received from " + "partid=%d, channel=%d\n", args->local_msgqueue_pa, + args->local_nentries, args->remote_nentries, + ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + + /* + * The meaningful OPENREPLY connection state fields are: + * local_msgqueue_pa = physical address of remote + * partition's local_msgqueue + * local_nentries = remote partition's local_nentries + * remote_nentries = remote partition's remote_nentries + */ + DBUG_ON(args->local_msgqueue_pa == 0); + DBUG_ON(args->local_nentries == 0); + DBUG_ON(args->remote_nentries == 0); + + ch->flags |= XPC_C_ROPENREPLY; + ch->remote_msgqueue_pa = args->local_msgqueue_pa; + + if (args->local_nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " + "remote_nentries=%d, old remote_nentries=%d, " + "partid=%d, channel=%d\n", + args->local_nentries, ch->remote_nentries, + ch->partid, ch->number); + + ch->remote_nentries = args->local_nentries; + } + if (args->remote_nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " + "local_nentries=%d, old local_nentries=%d, " + "partid=%d, channel=%d\n", + args->remote_nentries, ch->local_nentries, + ch->partid, ch->number); + + ch->local_nentries = args->remote_nentries; + } + + xpc_process_connect(ch, &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); +} + + +/* + * Attempt to establish a channel connection to a remote partition. + */ +static enum xpc_retval +xpc_connect_channel(struct xpc_channel *ch) +{ + unsigned long irq_flags; + struct xpc_registration *registration = &xpc_registrations[ch->number]; + + + if (down_interruptible(®istration->sema) != 0) { + return xpcInterrupted; + } + + if (!XPC_CHANNEL_REGISTERED(ch->number)) { + up(®istration->sema); + return xpcUnregistered; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + up(®istration->sema); + return ch->reason; + } + + + /* add info from the channel connect registration to the channel */ + + ch->kthreads_assigned_limit = registration->assigned_limit; + ch->kthreads_idle_limit = registration->idle_limit; + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); + DBUG_ON(atomic_read(&ch->kthreads_active) != 0); + + ch->func = registration->func; + DBUG_ON(registration->func == NULL); + ch->key = registration->key; + + ch->local_nentries = registration->nentries; + + if (ch->flags & XPC_C_ROPENREQUEST) { + if (registration->msg_size != ch->msg_size) { + /* the local and remote sides aren't the same */ + + /* + * Because XPC_DISCONNECT_CHANNEL() can block we're + * forced to up the registration sema before we unlock + * the channel lock. But that's okay here because we're + * done with the part that required the registration + * sema. XPC_DISCONNECT_CHANNEL() requires that the + * channel lock be locked and will unlock and relock + * the channel lock as needed. + */ + up(®istration->sema); + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcUnequalMsgSizes; + } + } else { + ch->msg_size = registration->msg_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&xpc_partitions[ch->partid].nchannels_active); + } + + up(®istration->sema); + + + /* initiate the connection */ + + ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); + xpc_IPI_send_openrequest(ch, &irq_flags); + + xpc_process_connect(ch, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpcSuccess; +} + + +/* + * Notify those who wanted to be notified upon delivery of their message. + */ +static void +xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) +{ + struct xpc_notify *notify; + u8 notify_type; + s64 get = ch->w_remote_GP.get - 1; + + + while (++get < put && atomic_read(&ch->n_to_notify) > 0) { + + notify = &ch->notify_queue[get % ch->local_nentries]; + + /* + * See if the notify entry indicates it was associated with + * a message who's sender wants to be notified. It is possible + * that it is, but someone else is doing or has done the + * notification. + */ + notify_type = notify->type; + if (notify_type == 0 || + cmpxchg(¬ify->type, notify_type, 0) != + notify_type) { + continue; + } + + DBUG_ON(notify_type != XPC_N_CALL); + + atomic_dec(&ch->n_to_notify); + + if (notify->func != NULL) { + dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *) notify, get, ch->partid, ch->number); + + notify->func(reason, ch->partid, ch->number, + notify->key); + + dev_dbg(xpc_chan, "notify->func() returned, " + "notify=0x%p, msg_number=%ld, partid=%d, " + "channel=%d\n", (void *) notify, get, + ch->partid, ch->number); + } + } +} + + +/* + * Clear some of the msg flags in the local message queue. + */ +static inline void +xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + s64 get; + + + get = ch->w_remote_GP.get; + do { + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + + (get % ch->local_nentries) * ch->msg_size); + msg->flags = 0; + } while (++get < (volatile s64) ch->remote_GP.get); +} + + +/* + * Clear some of the msg flags in the remote message queue. + */ +static inline void +xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + s64 put; + + + put = ch->w_remote_GP.put; + do { + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + + (put % ch->remote_nentries) * ch->msg_size); + msg->flags = 0; + } while (++put < (volatile s64) ch->remote_GP.put); +} + + +static void +xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + int nmsgs_sent; + + + ch->remote_GP = part->remote_GPs[ch_number]; + + + /* See what, if anything, has changed for each connected channel */ + + xpc_msgqueue_ref(ch); + + if (ch->w_remote_GP.get == ch->remote_GP.get && + ch->w_remote_GP.put == ch->remote_GP.put) { + /* nothing changed since GPs were last pulled */ + xpc_msgqueue_deref(ch); + return; + } + + if (!(ch->flags & XPC_C_CONNECTED)){ + xpc_msgqueue_deref(ch); + return; + } + + + /* + * First check to see if messages recently sent by us have been + * received by the other side. (The remote GET value will have + * changed since we last looked at it.) + */ + + if (ch->w_remote_GP.get != ch->remote_GP.get) { + + /* + * We need to notify any senders that want to be notified + * that their sent messages have been received by their + * intended recipients. We need to do this before updating + * w_remote_GP.get so that we don't allocate the same message + * queue entries prematurely (see xpc_allocate_msg()). + */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* + * Notify senders that messages sent have been + * received and delivered by the other side. + */ + xpc_notify_senders(ch, xpcMsgDelivered, + ch->remote_GP.get); + } + + /* + * Clear msg->flags in previously sent messages, so that + * they're ready for xpc_allocate_msg(). + */ + xpc_clear_local_msgqueue_flags(ch); + + (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get; + + dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " + "channel=%d\n", ch->w_remote_GP.get, ch->partid, + ch->number); + + /* + * If anyone was waiting for message queue entries to become + * available, wake them up. + */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { + wake_up(&ch->msg_allocate_wq); + } + } + + + /* + * Now check for newly sent messages by the other side. (The remote + * PUT value will have changed since we last looked at it.) + */ + + if (ch->w_remote_GP.put != ch->remote_GP.put) { + /* + * Clear msg->flags in previously received messages, so that + * they're ready for xpc_get_deliverable_msg(). + */ + xpc_clear_remote_msgqueue_flags(ch); + + (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put; + + dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " + "channel=%d\n", ch->w_remote_GP.put, ch->partid, + ch->number); + + nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get; + if (nmsgs_sent > 0) { + dev_dbg(xpc_chan, "msgs waiting to be copied and " + "delivered=%d, partid=%d, channel=%d\n", + nmsgs_sent, ch->partid, ch->number); + + if (ch->flags & XPC_C_CONNECTCALLOUT) { + xpc_activate_kthreads(ch, nmsgs_sent); + } + } + } + + xpc_msgqueue_deref(ch); +} + + +void +xpc_process_channel_activity(struct xpc_partition *part) +{ + unsigned long irq_flags; + u64 IPI_amo, IPI_flags; + struct xpc_channel *ch; + int ch_number; + + + IPI_amo = xpc_get_IPI_flags(part); + + /* + * Initiate channel connections for registered channels. + * + * For each connected channel that has pending messages activate idle + * kthreads and/or create new kthreads as needed. + */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + + /* + * Process any open or close related IPI flags, and then deal + * with connecting or disconnecting the channel as required. + */ + + IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); + + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { + xpc_process_openclose_IPI(part, ch_number, IPI_flags); + } + + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_disconnect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + continue; + } + + if (part->act_state == XPC_P_DEACTIVATING) { + continue; + } + + if (!(ch->flags & XPC_C_CONNECTED)) { + if (!(ch->flags & XPC_C_OPENREQUEST)) { + DBUG_ON(ch->flags & XPC_C_SETUP); + (void) xpc_connect_channel(ch); + } else { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_connect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + continue; + } + + + /* + * Process any message related IPI flags, this may involve the + * activation of kthreads to deliver any pending messages sent + * from the other partition. + */ + + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { + xpc_process_msg_IPI(part, ch_number); + } + } +} + + +/* + * XPC's heartbeat code calls this function to inform XPC that a partition has + * gone down. XPC responds by tearing down the XPartition Communication + * infrastructure used for the just downed partition. + * + * XPC's heartbeat code will never call this function and xpc_partition_up() + * at the same time. Nor will it ever make multiple calls to either function + * at the same time. + */ +void +xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason) +{ + unsigned long irq_flags; + int ch_number; + struct xpc_channel *ch; + + + dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", + XPC_PARTID(part), reason); + + if (!xpc_part_ref(part)) { + /* infrastructure for this partition isn't currently set up */ + return; + } + + + /* disconnect all channels associated with the downed partition */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + + xpc_msgqueue_ref(ch); + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + xpc_msgqueue_deref(ch); + } + + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); +} + + +/* + * Teardown the infrastructure necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +void +xpc_teardown_infrastructure(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + + + /* + * We start off by making this partition inaccessible to local + * processes by marking it as no longer setup. Then we make it + * inaccessible to remote processes by clearing the XPC per partition + * specific variable's magic # (which indicates that these variables + * are no longer valid) and by ignoring all XPC notify IPIs sent to + * this partition. + */ + + DBUG_ON(atomic_read(&part->nchannels_active) != 0); + DBUG_ON(part->setup_state != XPC_P_SETUP); + part->setup_state = XPC_P_WTEARDOWN; + + xpc_vars_part[partid].magic = 0; + + + free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid); + + + /* + * Before proceding with the teardown we have to wait until all + * existing references cease. + */ + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); + + + /* now we can begin tearing down the infrastructure */ + + part->setup_state = XPC_P_TORNDOWN; + + /* in case we've still got outstanding timers registered... */ + del_timer_sync(&part->dropped_IPI_timer); + + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + part->local_IPI_amo_va = NULL; +} + + +/* + * Called by XP at the time of channel connection registration to cause + * XPC to establish connections to all currently active partitions. + */ +void +xpc_initiate_connect(int ch_number) +{ + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_SETUP); + + /* + * Initiate the establishment of a connection + * on the newly registered channel to the + * remote partition. + */ + xpc_wakeup_channel_mgr(part); + } + + xpc_part_deref(part); + } + } +} + + +void +xpc_connected_callout(struct xpc_channel *ch) +{ + unsigned long irq_flags; + + + /* let the registerer know that a connection has been established */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + ch->func(xpcConnected, ch->partid, ch->number, + (void *) (u64) ch->local_nentries, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + } + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_CONNECTCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); +} + + +/* + * Called by XP at the time of channel connection unregistration to cause + * XPC to teardown all current connections for the specified channel. + * + * Before returning xpc_initiate_disconnect() will wait until all connections + * on the specified channel have been closed/torndown. So the caller can be + * assured that they will not be receiving any more callouts from XPC to the + * function they registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_initiate_disconnect(int ch_number) +{ + unsigned long irq_flags; + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + /* initiate the channel disconnect for every active partition */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, + &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + } + } + + xpc_disconnect_wait(ch_number); +} + + +/* + * To disconnect a channel, and reflect it back to all who may be waiting. + * + * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by + * >>> xpc_free_msgqueues(). + * + * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. + */ +void +xpc_disconnect_channel(const int line, struct xpc_channel *ch, + enum xpc_retval reason, unsigned long *irq_flags) +{ + u32 flags; + + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { + return; + } + DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); + + dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", + reason, line, ch->partid, ch->number); + + XPC_SET_REASON(ch, reason, line); + + flags = ch->flags; + /* some of these may not have been set */ + ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | + XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_CONNECTING | XPC_C_CONNECTED); + + ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); + xpc_IPI_send_closerequest(ch, irq_flags); + + if (flags & XPC_C_CONNECTED) { + ch->flags |= XPC_C_WASCONNECTED; + } + + if (atomic_read(&ch->kthreads_idle) > 0) { + /* wake all idle kthreads so they can exit */ + wake_up_all(&ch->idle_wq); + } + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + + /* wake those waiting to allocate an entry from the local msg queue */ + + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { + wake_up(&ch->msg_allocate_wq); + } + + /* wake those waiting for notify completion */ + + if (atomic_read(&ch->n_to_notify) > 0) { + xpc_notify_senders(ch, reason, ch->w_local_GP.put); + } + + spin_lock_irqsave(&ch->lock, *irq_flags); +} + + +void +xpc_disconnected_callout(struct xpc_channel *ch) +{ + /* + * Let the channel's registerer know that the channel is now + * disconnected. We don't want to do this if the registerer was never + * informed of a connection being made, unless the disconnect was for + * abnormal reasons. + */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " + "channel=%d\n", ch->reason, ch->partid, ch->number); + + ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " + "channel=%d\n", ch->reason, ch->partid, ch->number); + } +} + + +/* + * Wait for a message entry to become available for the specified channel, + * but don't wait any longer than 1 jiffy. + */ +static enum xpc_retval +xpc_allocate_msg_wait(struct xpc_channel *ch) +{ + enum xpc_retval ret; + + + if (ch->flags & XPC_C_DISCONNECTING) { + DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? + return ch->reason; + } + + atomic_inc(&ch->n_on_msg_allocate_wq); + ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); + atomic_dec(&ch->n_on_msg_allocate_wq); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? + } else if (ret == 0) { + ret = xpcTimeout; + } else { + ret = xpcInterrupted; + } + + return ret; +} + + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. + */ +static enum xpc_retval +xpc_allocate_msg(struct xpc_channel *ch, u32 flags, + struct xpc_msg **address_of_msg) +{ + struct xpc_msg *msg; + enum xpc_retval ret; + s64 put; + + + /* this reference will be dropped in xpc_send_msg() */ + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + xpc_msgqueue_deref(ch); + return ch->reason; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return xpcNotConnected; + } + + + /* + * Get the next available message entry from the local message queue. + * If none are available, we'll make sure that we grab the latest + * GP values. + */ + ret = xpcTimeout; + + while (1) { + + put = (volatile s64) ch->w_local_GP.put; + if (put - (volatile s64) ch->w_remote_GP.get < + ch->local_nentries) { + + /* There are available message entries. We need to try + * to secure one for ourselves. We'll do this by trying + * to increment w_local_GP.put as long as someone else + * doesn't beat us to it. If they do, we'll have to + * try again. + */ + if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == + put) { + /* we got the entry referenced by put */ + break; + } + continue; /* try again */ + } + + + /* + * There aren't any available msg entries at this time. + * + * In waiting for a message entry to become available, + * we set a timeout in case the other side is not + * sending completion IPIs. This lets us fake an IPI + * that will cause the IPI handler to fetch the latest + * GP values as if an IPI was sent by the other side. + */ + if (ret == xpcTimeout) { + xpc_IPI_send_local_msgrequest(ch); + } + + if (flags & XPC_NOWAIT) { + xpc_msgqueue_deref(ch); + return xpcNoWait; + } + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpcInterrupted && ret != xpcTimeout) { + xpc_msgqueue_deref(ch); + return ret; + } + } + + + /* get the message's address and initialize it */ + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + + (put % ch->local_nentries) * ch->msg_size); + + + DBUG_ON(msg->flags != 0); + msg->number = put; + + dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", put + 1, + (void *) msg, msg->number, ch->partid, ch->number); + + *address_of_msg = msg; + + return xpcSuccess; +} + + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. NOTE that this routine can sleep waiting for a message + * entry to become available. To not sleep, pass in the XPC_NOWAIT flag. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel #. + * flags - see xpc.h for valid flags. + * payload - address of the allocated payload area pointer (filled in on + * return) in which the user-defined message is constructed. + */ +enum xpc_retval +xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xpc_retval ret = xpcUnknownReason; + struct xpc_msg *msg; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + *payload = NULL; + + if (xpc_part_ref(part)) { + ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); + xpc_part_deref(part); + + if (msg != NULL) { + *payload = &msg->payload; + } + } + + return ret; +} + + +/* + * Now we actually send the messages that are ready to be sent by advancing + * the local message queue's Put value and then send an IPI to the recipient + * partition. + */ +static void +xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) +{ + struct xpc_msg *msg; + s64 put = initial_put + 1; + int send_IPI = 0; + + + while (1) { + + while (1) { + if (put == (volatile s64) ch->w_local_GP.put) { + break; + } + + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + + (put % ch->local_nentries) * ch->msg_size); + + if (!(msg->flags & XPC_M_READY)) { + break; + } + + put++; + } + + if (put == initial_put) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != + initial_put) { + /* someone else beat us to it */ + DBUG_ON((volatile s64) ch->local_GP->put < initial_put); + break; + } + + /* we just set the new value of local_GP->put */ + + dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " + "channel=%d\n", put, ch->partid, ch->number); + + send_IPI = 1; + + /* + * We need to ensure that the message referenced by + * local_GP->put is not XPC_M_READY or that local_GP->put + * equals w_local_GP.put, so we'll go have a look. + */ + initial_put = put; + } + + if (send_IPI) { + xpc_IPI_send_msgrequest(ch); + } +} + + +/* + * Common code that does the actual sending of the message by advancing the + * local message queue's Put value and sends an IPI to the partition the + * message is being sent to. + */ +static enum xpc_retval +xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, + xpc_notify_func func, void *key) +{ + enum xpc_retval ret = xpcSuccess; + struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!! + s64 put, msg_number = msg->number; + + + DBUG_ON(notify_type == XPC_N_CALL && func == NULL); + DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != + msg_number % ch->local_nentries); + DBUG_ON(msg->flags & XPC_M_READY); + + if (ch->flags & XPC_C_DISCONNECTING) { + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ch->reason; + } + + if (notify_type != 0) { + /* + * Tell the remote side to send an ACK interrupt when the + * message has been delivered. + */ + msg->flags |= XPC_M_INTERRUPT; + + atomic_inc(&ch->n_to_notify); + + notify = &ch->notify_queue[msg_number % ch->local_nentries]; + notify->func = func; + notify->key = key; + (volatile u8) notify->type = notify_type; + + // >>> is a mb() needed here? + + if (ch->flags & XPC_C_DISCONNECTING) { + /* + * An error occurred between our last error check and + * this one. We will try to clear the type field from + * the notify entry. If we succeed then + * xpc_disconnect_channel() didn't already process + * the notify entry. + */ + if (cmpxchg(¬ify->type, notify_type, 0) == + notify_type) { + atomic_dec(&ch->n_to_notify); + ret = ch->reason; + } + + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ret; + } + } + + msg->flags |= XPC_M_READY; + + /* + * The preceding store of msg->flags must occur before the following + * load of ch->local_GP->put. + */ + mb(); + + /* see if the message is next in line to be sent, if so send it */ + + put = ch->local_GP->put; + if (put == msg_number) { + xpc_send_msgs(ch, put); + } + + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ret; +} + + +/* + * Send a message previously allocated using xpc_initiate_allocate() on the + * specified channel connected to the specified partition. + * + * This routine will not wait for the message to be received, nor will + * notification be given when it does happen. Once this routine has returned + * the message entry allocated via xpc_initiate_allocate() is no longer + * accessable to the caller. + * + * This routine, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + */ +enum xpc_retval +xpc_initiate_send(partid_t partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + enum xpc_retval ret; + + + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, + partid, ch_number); + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(msg == NULL); + + ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); + + return ret; +} + + +/* + * Send a message previously allocated using xpc_initiate_allocate on the + * specified channel connected to the specified partition. + * + * This routine will not wait for the message to be sent. Once this routine + * has returned the message entry allocated via xpc_initiate_allocate() is no + * longer accessable to the caller. + * + * Once the remote end of the channel has received the message, the function + * passed as an argument to xpc_initiate_send_notify() will be called. This + * allows the sender to free up or re-use any buffers referenced by the + * message, but does NOT mean the message has been processed at the remote + * end by a receiver. + * + * If this routine returns an error, the caller's function will NOT be called. + * + * This routine, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + * func - function to call with asynchronous notification of message + * receipt. THIS FUNCTION MUST BE NON-BLOCKING. + * key - user-defined key to be passed to the function when it's called. + */ +enum xpc_retval +xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, + xpc_notify_func func, void *key) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + enum xpc_retval ret; + + + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, + partid, ch_number); + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(msg == NULL); + DBUG_ON(func == NULL); + + ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, + func, key); + return ret; +} + + +static struct xpc_msg * +xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct xpc_msg *remote_msg, *msg; + u32 msg_index, nmsgs; + u64 msg_offset; + enum xpc_retval ret; + + + if (down_interruptible(&ch->msg_to_pull_sema) != 0) { + /* we were interrupted by a signal */ + return NULL; + } + + while (get >= ch->next_msg_to_pull) { + + /* pull as many messages as are ready and able to be pulled */ + + msg_index = ch->next_msg_to_pull % ch->remote_nentries; + + DBUG_ON(ch->next_msg_to_pull >= + (volatile s64) ch->w_remote_GP.put); + nmsgs = (volatile s64) ch->w_remote_GP.put - + ch->next_msg_to_pull; + if (msg_index + nmsgs > ch->remote_nentries) { + /* ignore the ones that wrap the msg queue for now */ + nmsgs = ch->remote_nentries - msg_index; + } + + msg_offset = msg_index * ch->msg_size; + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + + msg_offset); + remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + + msg_offset); + + if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, + nmsgs * ch->msg_size)) != xpcSuccess) { + + dev_dbg(xpc_chan, "failed to pull %d msgs starting with" + " msg %ld from partition %d, channel=%d, " + "ret=%d\n", nmsgs, ch->next_msg_to_pull, + ch->partid, ch->number, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + + up(&ch->msg_to_pull_sema); + return NULL; + } + + mb(); /* >>> this may not be needed, we're not sure */ + + ch->next_msg_to_pull += nmsgs; + } + + up(&ch->msg_to_pull_sema); + + /* return the message we were looking for */ + msg_offset = (get % ch->remote_nentries) * ch->msg_size; + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); + + return msg; +} + + +/* + * Get a message to be delivered. + */ +static struct xpc_msg * +xpc_get_deliverable_msg(struct xpc_channel *ch) +{ + struct xpc_msg *msg = NULL; + s64 get; + + + do { + if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { + break; + } + + get = (volatile s64) ch->w_local_GP.get; + if (get == (volatile s64) ch->w_remote_GP.put) { + break; + } + + /* There are messages waiting to be pulled and delivered. + * We need to try to secure one for ourselves. We'll do this + * by trying to increment w_local_GP.get and hope that no one + * else beats us to it. If they do, we'll we'll simply have + * to try again for the next one. + */ + + if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { + /* we got the entry referenced by get */ + + dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " + "partid=%d, channel=%d\n", get + 1, + ch->partid, ch->number); + + /* pull the message from the remote partition */ + + msg = xpc_pull_remote_msg(ch, get); + + DBUG_ON(msg != NULL && msg->number != get); + DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); + DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); + + break; + } + + } while (1); + + return msg; +} + + +/* + * Deliver a message to its intended recipient. + */ +void +xpc_deliver_msg(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + + + if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { + + /* + * This ref is taken to protect the payload itself from being + * freed before the user is finished with it, which the user + * indicates by calling xpc_initiate_received(). + */ + xpc_msgqueue_ref(ch); + + atomic_inc(&ch->kthreads_active); + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *) msg, msg->number, ch->partid, + ch->number); + + /* deliver the message to its intended recipient */ + ch->func(xpcMsgReceived, ch->partid, ch->number, + &msg->payload, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *) msg, msg->number, ch->partid, + ch->number); + } + + atomic_dec(&ch->kthreads_active); + } +} + + +/* + * Now we actually acknowledge the messages that have been delivered and ack'd + * by advancing the cached remote message queue's Get value and if requested + * send an IPI to the message sender's partition. + */ +static void +xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) +{ + struct xpc_msg *msg; + s64 get = initial_get + 1; + int send_IPI = 0; + + + while (1) { + + while (1) { + if (get == (volatile s64) ch->w_local_GP.get) { + break; + } + + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + + (get % ch->remote_nentries) * ch->msg_size); + + if (!(msg->flags & XPC_M_DONE)) { + break; + } + + msg_flags |= msg->flags; + get++; + } + + if (get == initial_get) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != + initial_get) { + /* someone else beat us to it */ + DBUG_ON((volatile s64) ch->local_GP->get <= + initial_get); + break; + } + + /* we just set the new value of local_GP->get */ + + dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " + "channel=%d\n", get, ch->partid, ch->number); + + send_IPI = (msg_flags & XPC_M_INTERRUPT); + + /* + * We need to ensure that the message referenced by + * local_GP->get is not XPC_M_DONE or that local_GP->get + * equals w_local_GP.get, so we'll go have a look. + */ + initial_get = get; + } + + if (send_IPI) { + xpc_IPI_send_msgrequest(ch); + } +} + + +/* + * Acknowledge receipt of a delivered message. + * + * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition + * that sent the message. + * + * This function, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # message received on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + */ +void +xpc_initiate_received(partid_t partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + s64 get, msg_number = msg->number; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + ch = &part->channels[ch_number]; + + dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", + (void *) msg, msg_number, ch->partid, ch->number); + + DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != + msg_number % ch->remote_nentries); + DBUG_ON(msg->flags & XPC_M_DONE); + + msg->flags |= XPC_M_DONE; + + /* + * The preceding store of msg->flags must occur before the following + * load of ch->local_GP->get. + */ + mb(); + + /* + * See if this message is next in line to be acknowledged as having + * been delivered. + */ + get = ch->local_GP->get; + if (get == msg_number) { + xpc_acknowledge_msgs(ch, get, msg->flags); + } + + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ + xpc_msgqueue_deref(ch); +} + diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c new file mode 100644 index 0000000..177ddb7 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -0,0 +1,1064 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) support - standard version. + * + * XPC provides a message passing capability that crosses partition + * boundaries. This module is made up of two parts: + * + * partition This part detects the presence/absence of other + * partitions. It provides a heartbeat and monitors + * the heartbeats of other partitions. + * + * channel This part manages the channels and sends/receives + * messages across them to/from other partitions. + * + * There are a couple of additional functions residing in XP, which + * provide an interface to XPC for its users. + * + * + * Caveats: + * + * . We currently have no way to determine which nasid an IPI came + * from. Thus, xpc_IPI_send() does a remote AMO write followed by + * an IPI. The AMO indicates where data is to be pulled from, so + * after the IPI arrives, the remote partition checks the AMO word. + * The IPI can actually arrive before the AMO however, so other code + * must periodically check for this case. Also, remote AMO operations + * do not reliably time out. Thus we do a remote PIO read solely to + * know whether the remote partition is down and whether we should + * stop sending IPIs to it. This remote PIO read operation is set up + * in a special nofault region so SAL knows to ignore (and cleanup) + * any errors due to the remote AMO write, PIO read, and/or PIO + * write operations. + * + * If/when new hardware solves this IPI problem, we should abandon + * the current approach. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xpc.h" + + +/* define two XPC debug device structures to be used with dev_dbg() et al */ + +struct device_driver xpc_dbg_name = { + .name = "xpc" +}; + +struct device xpc_part_dbg_subname = { + .bus_id = {0}, /* set to "part" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device xpc_chan_dbg_subname = { + .bus_id = {0}, /* set to "chan" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device *xpc_part = &xpc_part_dbg_subname; +struct device *xpc_chan = &xpc_chan_dbg_subname; + + +/* systune related variables for /proc/sys directories */ + +static int xpc_hb_min = 1; +static int xpc_hb_max = 10; + +static int xpc_hb_check_min = 10; +static int xpc_hb_check_max = 120; + +static ctl_table xpc_sys_xpc_hb_dir[] = { + { + 1, + "hb_interval", + &xpc_hb_interval, + sizeof(int), + 0644, + NULL, + &proc_dointvec_minmax, + &sysctl_intvec, + NULL, + &xpc_hb_min, &xpc_hb_max + }, + { + 2, + "hb_check_interval", + &xpc_hb_check_interval, + sizeof(int), + 0644, + NULL, + &proc_dointvec_minmax, + &sysctl_intvec, + NULL, + &xpc_hb_check_min, &xpc_hb_check_max + }, + {0} +}; +static ctl_table xpc_sys_xpc_dir[] = { + { + 1, + "hb", + NULL, + 0, + 0555, + xpc_sys_xpc_hb_dir + }, + {0} +}; +static ctl_table xpc_sys_dir[] = { + { + 1, + "xpc", + NULL, + 0, + 0555, + xpc_sys_xpc_dir + }, + {0} +}; +static struct ctl_table_header *xpc_sysctl; + + +/* #of IRQs received */ +static atomic_t xpc_act_IRQ_rcvd; + +/* IRQ handler notifies this wait queue on receipt of an IRQ */ +static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); + +static unsigned long xpc_hb_check_timeout; + +/* xpc_hb_checker thread exited notification */ +static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); + +/* xpc_discovery thread exited notification */ +static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); + + +static struct timer_list xpc_hb_timer; + + +static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); + + +/* + * Notify the heartbeat check thread that an IRQ has been received. + */ +static irqreturn_t +xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + atomic_inc(&xpc_act_IRQ_rcvd); + wake_up_interruptible(&xpc_act_IRQ_wq); + return IRQ_HANDLED; +} + + +/* + * Timer to produce the heartbeat. The timer structures function is + * already set when this is initially called. A tunable is used to + * specify when the next timeout should occur. + */ +static void +xpc_hb_beater(unsigned long dummy) +{ + xpc_vars->heartbeat++; + + if (jiffies >= xpc_hb_check_timeout) { + wake_up_interruptible(&xpc_act_IRQ_wq); + } + + xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); + add_timer(&xpc_hb_timer); +} + + +/* + * This thread is responsible for nearly all of the partition + * activation/deactivation. + */ +static int +xpc_hb_checker(void *ignore) +{ + int last_IRQ_count = 0; + int new_IRQ_count; + int force_IRQ=0; + + + /* this thread was marked active by xpc_hb_init() */ + + daemonize(XPC_HB_CHECK_THREAD_NAME); + + set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); + + xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); + + while (!(volatile int) xpc_exiting) { + + /* wait for IRQ or timeout */ + (void) wait_event_interruptible(xpc_act_IRQ_wq, + (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || + jiffies >= xpc_hb_check_timeout || + (volatile int) xpc_exiting)); + + dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " + "been received\n", + (int) (xpc_hb_check_timeout - jiffies), + atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); + + + /* checking of remote heartbeats is skewed by IRQ handling */ + if (jiffies >= xpc_hb_check_timeout) { + dev_dbg(xpc_part, "checking remote heartbeats\n"); + xpc_check_remote_hb(); + + /* + * We need to periodically recheck to ensure no + * IPI/AMO pairs have been missed. That check + * must always reset xpc_hb_check_timeout. + */ + force_IRQ = 1; + } + + + new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); + if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { + force_IRQ = 0; + + dev_dbg(xpc_part, "found an IRQ to process; will be " + "resetting xpc_hb_check_timeout\n"); + + last_IRQ_count += xpc_identify_act_IRQ_sender(); + if (last_IRQ_count < new_IRQ_count) { + /* retry once to help avoid missing AMO */ + (void) xpc_identify_act_IRQ_sender(); + } + last_IRQ_count = new_IRQ_count; + + xpc_hb_check_timeout = jiffies + + (xpc_hb_check_interval * HZ); + } + } + + dev_dbg(xpc_part, "heartbeat checker is exiting\n"); + + + /* mark this thread as inactive */ + up(&xpc_hb_checker_exited); + return 0; +} + + +/* + * This thread will attempt to discover other partitions to activate + * based on info provided by SAL. This new thread is short lived and + * will exit once discovery is complete. + */ +static int +xpc_initiate_discovery(void *ignore) +{ + daemonize(XPC_DISCOVERY_THREAD_NAME); + + xpc_discovery(); + + dev_dbg(xpc_part, "discovery thread is exiting\n"); + + /* mark this thread as inactive */ + up(&xpc_discovery_exited); + return 0; +} + + +/* + * Establish first contact with the remote partititon. This involves pulling + * the XPC per partition variables from the remote partition and waiting for + * the remote partition to pull ours. + */ +static enum xpc_retval +xpc_make_first_contact(struct xpc_partition *part) +{ + enum xpc_retval ret; + + + while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { + if (ret != xpcRetry) { + XPC_DEACTIVATE_PARTITION(part, ret); + return ret; + } + + dev_dbg(xpc_chan, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + set_current_state(TASK_INTERRUPTIBLE); + (void) schedule_timeout(0.25 * HZ); + + if (part->act_state == XPC_P_DEACTIVATING) { + return part->reason; + } + } + + return xpc_mark_partition_active(part); +} + + +/* + * The first kthread assigned to a newly activated partition is the one + * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to + * that kthread until the partition is brought down, at which time that kthread + * returns back to XPC HB. (The return of that kthread will signify to XPC HB + * that XPC has dismantled all communication infrastructure for the associated + * partition.) This kthread becomes the channel manager for that partition. + * + * Each active partition has a channel manager, who, besides connecting and + * disconnecting channels, will ensure that each of the partition's connected + * channels has the required number of assigned kthreads to get the work done. + */ +static void +xpc_channel_mgr(struct xpc_partition *part) +{ + while (part->act_state != XPC_P_DEACTIVATING || + atomic_read(&part->nchannels_active) > 0) { + + xpc_process_channel_activity(part); + + + /* + * Wait until we've been requested to activate kthreads or + * all of the channel's message queues have been torn down or + * a signal is pending. + * + * The channel_mgr_requests is set to 1 after being awakened, + * This is done to prevent the channel mgr from making one pass + * through the loop for each request, since he will + * be servicing all the requests in one pass. The reason it's + * set to 1 instead of 0 is so that other kthreads will know + * that the channel mgr is running and won't bother trying to + * wake him up. + */ + atomic_dec(&part->channel_mgr_requests); + (void) wait_event_interruptible(part->channel_mgr_wq, + (atomic_read(&part->channel_mgr_requests) > 0 || + (volatile u64) part->local_IPI_amo != 0 || + ((volatile u8) part->act_state == + XPC_P_DEACTIVATING && + atomic_read(&part->nchannels_active) == 0))); + atomic_set(&part->channel_mgr_requests, 1); + + // >>> Does it need to wakeup periodically as well? In case we + // >>> miscalculated the #of kthreads to wakeup or create? + } +} + + +/* + * When XPC HB determines that a partition has come up, it will create a new + * kthread and that kthread will call this function to attempt to set up the + * basic infrastructure used for Cross Partition Communication with the newly + * upped partition. + * + * The kthread that was created by XPC HB and which setup the XPC + * infrastructure will remain assigned to the partition until the partition + * goes down. At which time the kthread will teardown the XPC infrastructure + * and then exit. + * + * XPC HB will put the remote partition's XPC per partition specific variables + * physical address into xpc_partitions[partid].remote_vars_part_pa prior to + * calling xpc_partition_up(). + */ +static void +xpc_partition_up(struct xpc_partition *part) +{ + DBUG_ON(part->channels != NULL); + + dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); + + if (xpc_setup_infrastructure(part) != xpcSuccess) { + return; + } + + /* + * The kthread that XPC HB called us with will become the + * channel manager for this partition. It will not return + * back to XPC HB until the partition's XPC infrastructure + * has been dismantled. + */ + + (void) xpc_part_ref(part); /* this will always succeed */ + + if (xpc_make_first_contact(part) == xpcSuccess) { + xpc_channel_mgr(part); + } + + xpc_part_deref(part); + + xpc_teardown_infrastructure(part); +} + + +static int +xpc_activating(void *__partid) +{ + partid_t partid = (u64) __partid; + struct xpc_partition *part = &xpc_partitions[partid]; + unsigned long irq_flags; + struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 }; + int ret; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_DEACTIVATING) { + part->act_state = XPC_P_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + /* indicate the thread is activating */ + DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); + part->act_state = XPC_P_ACTIVATING; + + XPC_SET_REASON(part, 0, 0); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + dev_dbg(xpc_part, "bringing partition %d up\n", partid); + + daemonize("xpc%02d", partid); + + /* + * This thread needs to run at a realtime priority to prevent a + * significant performance degradation. + */ + ret = sched_setscheduler(current, SCHED_FIFO, ¶m); + if (ret != 0) { + dev_warn(xpc_part, "unable to set pid %d to a realtime " + "priority, ret=%d\n", current->pid, ret); + } + + /* allow this thread and its children to run on any CPU */ + set_cpus_allowed(current, CPU_MASK_ALL); + + /* + * Register the remote partition's AMOs with SAL so it can handle + * and cleanup errors within that address range should the remote + * partition go down. We don't unregister this range because it is + * difficult to tell when outstanding writes to the remote partition + * are finished and thus when it is safe to unregister. This should + * not result in wasted space in the SAL xp_addr_region table because + * we should get the same page for remote_amos_page_pa after module + * reloads and system reboots. + */ + if (sn_register_xp_addr_region(part->remote_amos_page_pa, + PAGE_SIZE, 1) < 0) { + dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " + "xp_addr region\n", partid); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + XPC_ALLOW_HB(partid, xpc_vars); + xpc_IPI_send_activated(part); + + + /* + * xpc_partition_up() holds this thread and marks this partition as + * XPC_P_ACTIVE by calling xpc_hb_mark_active(). + */ + (void) xpc_partition_up(part); + + xpc_mark_partition_inactive(part); + + if (part->reason == xpcReactivating) { + /* interrupting ourselves results in activating partition */ + xpc_IPI_send_reactivate(part); + } + + return 0; +} + + +void +xpc_activate_partition(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + unsigned long irq_flags; + pid_t pid; + + + spin_lock_irqsave(&part->act_lock, irq_flags); + + pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); + + DBUG_ON(part->act_state != XPC_P_INACTIVE); + + if (pid > 0) { + part->act_state = XPC_P_ACTIVATION_REQ; + XPC_SET_REASON(part, xpcCloneKThread, __LINE__); + } else { + XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); + } + + spin_unlock_irqrestore(&part->act_lock, irq_flags); +} + + +/* + * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified + * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more + * than one partition, we use an AMO_t structure per partition to indicate + * whether a partition has sent an IPI or not. >>> If it has, then wake up the + * associated kthread to handle it. + * + * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC + * running on other partitions. + * + * Noteworthy Arguments: + * + * irq - Interrupt ReQuest number. NOT USED. + * + * dev_id - partid of IPI's potential sender. + * + * regs - processor's context before the processor entered + * interrupt code. NOT USED. + */ +irqreturn_t +xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + partid_t partid = (partid_t) (u64) dev_id; + struct xpc_partition *part = &xpc_partitions[partid]; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + + if (xpc_part_ref(part)) { + xpc_check_for_channel_activity(part); + + xpc_part_deref(part); + } + return IRQ_HANDLED; +} + + +/* + * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor + * because the write to their associated IPI amo completed after the IRQ/IPI + * was received. + */ +void +xpc_dropped_IPI_check(struct xpc_partition *part) +{ + if (xpc_part_ref(part)) { + xpc_check_for_channel_activity(part); + + part->dropped_IPI_timer.expires = jiffies + + XPC_P_DROPPED_IPI_WAIT; + add_timer(&part->dropped_IPI_timer); + xpc_part_deref(part); + } +} + + +void +xpc_activate_kthreads(struct xpc_channel *ch, int needed) +{ + int idle = atomic_read(&ch->kthreads_idle); + int assigned = atomic_read(&ch->kthreads_assigned); + int wakeup; + + + DBUG_ON(needed <= 0); + + if (idle > 0) { + wakeup = (needed > idle) ? idle : needed; + needed -= wakeup; + + dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " + "channel=%d\n", wakeup, ch->partid, ch->number); + + /* only wakeup the requested number of kthreads */ + wake_up_nr(&ch->idle_wq, wakeup); + } + + if (needed <= 0) { + return; + } + + if (needed + assigned > ch->kthreads_assigned_limit) { + needed = ch->kthreads_assigned_limit - assigned; + // >>>should never be less than 0 + if (needed <= 0) { + return; + } + } + + dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", + needed, ch->partid, ch->number); + + xpc_create_kthreads(ch, needed); +} + + +/* + * This function is where XPC's kthreads wait for messages to deliver. + */ +static void +xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) +{ + do { + /* deliver messages to their intended recipients */ + + while ((volatile s64) ch->w_local_GP.get < + (volatile s64) ch->w_remote_GP.put && + !((volatile u32) ch->flags & + XPC_C_DISCONNECTING)) { + xpc_deliver_msg(ch); + } + + if (atomic_inc_return(&ch->kthreads_idle) > + ch->kthreads_idle_limit) { + /* too many idle kthreads on this channel */ + atomic_dec(&ch->kthreads_idle); + break; + } + + dev_dbg(xpc_chan, "idle kthread calling " + "wait_event_interruptible_exclusive()\n"); + + (void) wait_event_interruptible_exclusive(ch->idle_wq, + ((volatile s64) ch->w_local_GP.get < + (volatile s64) ch->w_remote_GP.put || + ((volatile u32) ch->flags & + XPC_C_DISCONNECTING))); + + atomic_dec(&ch->kthreads_idle); + + } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); +} + + +static int +xpc_daemonize_kthread(void *args) +{ + partid_t partid = XPC_UNPACK_ARG1(args); + u16 ch_number = XPC_UNPACK_ARG2(args); + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + int n_needed; + + + daemonize("xpc%02dc%d", partid, ch_number); + + dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", + partid, ch_number); + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); + + /* let registerer know that connection has been established */ + + if (atomic_read(&ch->kthreads_assigned) == 1) { + xpc_connected_callout(ch); + + /* + * It is possible that while the callout was being + * made that the remote partition sent some messages. + * If that is the case, we may need to activate + * additional kthreads to help deliver them. We only + * need one less than total #of messages to deliver. + */ + n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; + if (n_needed > 0 && + !(ch->flags & XPC_C_DISCONNECTING)) { + xpc_activate_kthreads(ch, n_needed); + } + } + + xpc_kthread_waitmsgs(part, ch); + } + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + ((ch->flags & XPC_C_CONNECTCALLOUT) || + (ch->reason != xpcUnregistering && + ch->reason != xpcOtherUnregistering))) { + xpc_disconnected_callout(ch); + } + + + xpc_msgqueue_deref(ch); + + dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", + partid, ch_number); + + xpc_part_deref(part); + return 0; +} + + +/* + * For each partition that XPC has established communications with, there is + * a minimum of one kernel thread assigned to perform any operation that + * may potentially sleep or block (basically the callouts to the asynchronous + * functions registered via xpc_connect()). + * + * Additional kthreads are created and destroyed by XPC as the workload + * demands. + * + * A kthread is assigned to one of the active channels that exists for a given + * partition. + */ +void +xpc_create_kthreads(struct xpc_channel *ch, int needed) +{ + unsigned long irq_flags; + pid_t pid; + u64 args = XPC_PACK_ARGS(ch->partid, ch->number); + + + while (needed-- > 0) { + pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); + if (pid < 0) { + /* the fork failed */ + + if (atomic_read(&ch->kthreads_assigned) < + ch->kthreads_idle_limit) { + /* + * Flag this as an error only if we have an + * insufficient #of kthreads for the channel + * to function. + * + * No xpc_msgqueue_ref() is needed here since + * the channel mgr is doing this. + */ + spin_lock_irqsave(&ch->lock, irq_flags); + XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + break; + } + + /* + * The following is done on behalf of the newly created + * kthread. That kthread is responsible for doing the + * counterpart to the following before it exits. + */ + (void) xpc_part_ref(&xpc_partitions[ch->partid]); + xpc_msgqueue_ref(ch); + atomic_inc(&ch->kthreads_assigned); + ch->kthreads_created++; // >>> temporary debug only!!! + } +} + + +void +xpc_disconnect_wait(int ch_number) +{ + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + + /* now wait for all callouts to the caller's function to cease */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + +// >>> how do we keep from falling into the window between our check and going +// >>> down and coming back up where sema is re-inited? + if (ch->flags & XPC_C_SETUP) { + (void) down(&ch->teardown_sema); + } + + xpc_part_deref(part); + } + } +} + + +static void +xpc_do_exit(void) +{ + partid_t partid; + int active_part_count; + struct xpc_partition *part; + + + /* now it's time to eliminate our heartbeat */ + del_timer_sync(&xpc_hb_timer); + xpc_vars->heartbeating_to_mask = 0; + + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + + /* + * Ignore all incoming interrupts. Without interupts the heartbeat + * checker won't activate any new partitions that may come up. + */ + free_irq(SGI_XPC_ACTIVATE, NULL); + + /* + * Cause the heartbeat checker and the discovery threads to exit. + * We don't want them attempting to activate new partitions as we + * try to deactivate the existing ones. + */ + xpc_exiting = 1; + wake_up_interruptible(&xpc_act_IRQ_wq); + + /* wait for the heartbeat checker thread to mark itself inactive */ + down(&xpc_hb_checker_exited); + + /* wait for the discovery thread to mark itself inactive */ + down(&xpc_discovery_exited); + + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(0.3 * HZ); + set_current_state(TASK_RUNNING); + + + /* wait for all partitions to become inactive */ + + do { + active_part_count = 0; + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + if (part->act_state != XPC_P_INACTIVE) { + active_part_count++; + + XPC_DEACTIVATE_PARTITION(part, xpcUnloading); + } + } + + if (active_part_count) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(0.3 * HZ); + set_current_state(TASK_RUNNING); + } + + } while (active_part_count > 0); + + + /* close down protections for IPI operations */ + xpc_restrict_IPI_ops(); + + + /* clear the interface to XPC's functions */ + xpc_clear_interface(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } +} + + +int __init +xpc_init(void) +{ + int ret; + partid_t partid; + struct xpc_partition *part; + pid_t pid; + + + /* + * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng + * both a partition's reserved page and its XPC variables. Its size was + * based on the size of a reserved page. So we need to ensure that the + * XPC variables will fit as well. + */ + if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) { + dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); + return -EPERM; + } + DBUG_ON((u64) xpc_remote_copy_buffer != + L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer)); + + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); + + xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1); + + /* + * The first few fields of each entry of xpc_partitions[] need to + * be initialized now so that calls to xpc_connect() and + * xpc_disconnect() can be made prior to the activation of any remote + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING + * PARTITION HAS BEEN ACTIVATED. + */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); + + part->act_IRQ_rcvd = 0; + spin_lock_init(&part->act_lock); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, 0, 0); + part->setup_state = XPC_P_UNSET; + init_waitqueue_head(&part->teardown_wq); + atomic_set(&part->references, 0); + } + + /* + * Open up protections for IPI operations (and AMO operations on + * Shub 1.1 systems). + */ + xpc_allow_IPI_ops(); + + /* + * Interrupts being processed will increment this atomic variable and + * awaken the heartbeat thread which will process the interrupts. + */ + atomic_set(&xpc_act_IRQ_rcvd, 0); + + /* + * This is safe to do before the xpc_hb_checker thread has started + * because the handler releases a wait queue. If an interrupt is + * received before the thread is waiting, it will not go to sleep, + * but rather immediately process the interrupt. + */ + ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, + "xpc hb", NULL); + if (ret != 0) { + dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " + "errno=%d\n", -ret); + + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } + return -EBUSY; + } + + /* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ + xpc_rsvd_page = xpc_rsvd_page_init(); + if (xpc_rsvd_page == NULL) { + dev_err(xpc_part, "could not setup our reserved page\n"); + + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } + return -EBUSY; + } + + + /* + * Set the beating to other partitions into motion. This is + * the last requirement for other partitions' discovery to + * initiate communications with us. + */ + init_timer(&xpc_hb_timer); + xpc_hb_timer.function = xpc_hb_beater; + xpc_hb_beater(0); + + + /* + * The real work-horse behind xpc. This processes incoming + * interrupts and monitors remote heartbeats. + */ + pid = kernel_thread(xpc_hb_checker, NULL, 0); + if (pid < 0) { + dev_err(xpc_part, "failed while forking hb check thread\n"); + + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + + del_timer_sync(&xpc_hb_timer); + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } + return -EBUSY; + } + + + /* + * Startup a thread that will attempt to discover other partitions to + * activate based on info provided by SAL. This new thread is short + * lived and will exit once discovery is complete. + */ + pid = kernel_thread(xpc_initiate_discovery, NULL, 0); + if (pid < 0) { + dev_err(xpc_part, "failed while forking discovery thread\n"); + + /* mark this new thread as a non-starter */ + up(&xpc_discovery_exited); + + xpc_do_exit(); + return -EBUSY; + } + + + /* set the interface to point at XPC's functions */ + xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, + xpc_initiate_allocate, xpc_initiate_send, + xpc_initiate_send_notify, xpc_initiate_received, + xpc_initiate_partid_to_nasids); + + return 0; +} +module_init(xpc_init); + + +void __exit +xpc_exit(void) +{ + xpc_do_exit(); +} +module_exit(xpc_exit); + + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); +MODULE_LICENSE("GPL"); + +module_param(xpc_hb_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " + "heartbeat increments."); + +module_param(xpc_hb_check_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " + "heartbeat checks."); + diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c new file mode 100644 index 0000000..b31d9988 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -0,0 +1,971 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) partition support. + * + * This is the part of XPC that detects the presence/absence of + * other partitions. It provides a heartbeat and monitors the + * heartbeats of other partitions. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xpc.h" + + +/* XPC is exiting flag */ +int xpc_exiting; + + +/* SH_IPI_ACCESS shub register value on startup */ +static u64 xpc_sh1_IPI_access; +static u64 xpc_sh2_IPI_access0; +static u64 xpc_sh2_IPI_access1; +static u64 xpc_sh2_IPI_access2; +static u64 xpc_sh2_IPI_access3; + + +/* original protection values for each node */ +u64 xpc_prot_vec[MAX_COMPACT_NODES]; + + +/* this partition's reserved page */ +struct xpc_rsvd_page *xpc_rsvd_page; + +/* this partition's XPC variables (within the reserved page) */ +struct xpc_vars *xpc_vars; +struct xpc_vars_part *xpc_vars_part; + + +/* + * For performance reasons, each entry of xpc_partitions[] is cacheline + * aligned. And xpc_partitions[] is padded with an additional entry at the + * end so that the last legitimate entry doesn't share its cacheline with + * another variable. + */ +struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; + + +/* + * Generic buffer used to store a local copy of the remote partitions + * reserved page or XPC variables. + * + * xpc_discovery runs only once and is a seperate thread that is + * very likely going to be processing in parallel with receiving + * interrupts. + */ +char ____cacheline_aligned + xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE]; + + +/* systune related variables */ +int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; +int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT; + + +/* + * Given a nasid, get the physical address of the partition's reserved page + * for that nasid. This function returns 0 on any error. + */ +static u64 +xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size) +{ + bte_result_t bte_res; + s64 status; + u64 cookie = 0; + u64 rp_pa = nasid; /* seed with nasid */ + u64 len = 0; + + + while (1) { + + status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, + &len); + + dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" + "0x%016lx, address=0x%016lx, len=0x%016lx\n", + status, cookie, rp_pa, len); + + if (status != SALRET_MORE_PASSES) { + break; + } + + if (len > buf_size) { + dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len); + status = SALRET_ERROR; + break; + } + + bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bte_res != BTE_SUCCESS) { + dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); + status = SALRET_ERROR; + break; + } + } + + if (status != SALRET_OK) { + rp_pa = 0; + } + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); + return rp_pa; +} + + +/* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ +struct xpc_rsvd_page * +xpc_rsvd_page_init(void) +{ + struct xpc_rsvd_page *rp; + AMO_t *amos_page; + u64 rp_pa, next_cl, nasid_array = 0; + int i, ret; + + + /* get the local reserved page's address */ + + rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), + (u64) xpc_remote_copy_buffer, + XPC_RSVD_PAGE_ALIGNED_SIZE); + if (rp_pa == 0) { + dev_err(xpc_part, "SAL failed to locate the reserved page\n"); + return NULL; + } + rp = (struct xpc_rsvd_page *) __va(rp_pa); + + if (rp->partid != sn_partition_id) { + dev_err(xpc_part, "the reserved page's partid of %d should be " + "%d\n", rp->partid, sn_partition_id); + return NULL; + } + + rp->version = XPC_RP_VERSION; + + /* + * Place the XPC variables on the cache line following the + * reserved page structure. + */ + next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE; + xpc_vars = (struct xpc_vars *) next_cl; + + /* + * Before clearing xpc_vars, see if a page of AMOs had been previously + * allocated. If not we'll need to allocate one and set permissions + * so that cross-partition AMOs are allowed. + * + * The allocated AMO page needs MCA reporting to remain disabled after + * XPC has unloaded. To make this work, we keep a copy of the pointer + * to this page (i.e., amos_page) in the struct xpc_vars structure, + * which is pointed to by the reserved page, and re-use that saved copy + * on subsequent loads of XPC. This AMO page is never freed, and its + * memory protections are never restricted. + */ + if ((amos_page = xpc_vars->amos_page) == NULL) { + amos_page = (AMO_t *) mspec_kalloc_page(0); + if (amos_page == NULL) { + dev_err(xpc_part, "can't allocate page of AMOs\n"); + return NULL; + } + + /* + * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems + * when xpc_allow_IPI_ops() is called via xpc_hb_init(). + */ + if (!enable_shub_wars_1_1()) { + ret = sn_change_memprotect(ia64_tpa((u64) amos_page), + PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xpc_part, "can't change memory " + "protections\n"); + mspec_kfree_page((unsigned long) amos_page); + return NULL; + } + } + } + + memset(xpc_vars, 0, sizeof(struct xpc_vars)); + + /* + * Place the XPC per partition specific variables on the cache line + * following the XPC variables structure. + */ + next_cl += XPC_VARS_ALIGNED_SIZE; + memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) * + XP_MAX_PARTITIONS); + xpc_vars_part = (struct xpc_vars_part *) next_cl; + xpc_vars->vars_part_pa = __pa(next_cl); + + xpc_vars->version = XPC_V_VERSION; + xpc_vars->act_nasid = cpuid_to_nasid(0); + xpc_vars->act_phys_cpuid = cpu_physical_id(0); + xpc_vars->amos_page = amos_page; /* save for next load of XPC */ + + + /* + * Initialize the activation related AMO variables. + */ + xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS); + for (i = 1; i < XP_NASID_MASK_WORDS; i++) { + xpc_IPI_init(i + XP_MAX_PARTITIONS); + } + /* export AMO page's physical address to other partitions */ + xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page); + + /* + * This signifies to the remote partition that our reserved + * page is initialized. + */ + (volatile u64) rp->vars_pa = __pa(xpc_vars); + + return rp; +} + + +/* + * Change protections to allow IPI operations (and AMO operations on + * Shub 1.1 systems). + */ +void +xpc_allow_IPI_ops(void) +{ + int node; + int nasid; + + + // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. + + if (is_shub2()) { + xpc_sh2_IPI_access0 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); + xpc_sh2_IPI_access1 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); + xpc_sh2_IPI_access2 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); + xpc_sh2_IPI_access3 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + -1UL); + } + + } else { + xpc_sh1_IPI_access = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + -1UL); + + /* + * Since the BIST collides with memory operations on + * SHUB 1.1 sn_change_memprotect() cannot be used. + */ + if (enable_shub_wars_1_1()) { + /* open up everything */ + xpc_prot_vec[node] = (u64) HUB_L((u64 *) + GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0)); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + -1UL); + } + } + } +} + + +/* + * Restrict protections to disallow IPI operations (and AMO operations on + * Shub 1.1 systems). + */ +void +xpc_restrict_IPI_ops(void) +{ + int node; + int nasid; + + + // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. + + if (is_shub2()) { + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + xpc_sh2_IPI_access0); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + xpc_sh2_IPI_access1); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + xpc_sh2_IPI_access2); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + xpc_sh2_IPI_access3); + } + + } else { + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + xpc_sh1_IPI_access); + + if (enable_shub_wars_1_1()) { + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + xpc_prot_vec[node]); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + xpc_prot_vec[node]); + } + } + } +} + + +/* + * At periodic intervals, scan through all active partitions and ensure + * their heartbeat is still active. If not, the partition is deactivated. + */ +void +xpc_check_remote_hb(void) +{ + struct xpc_vars *remote_vars; + struct xpc_partition *part; + partid_t partid; + bte_result_t bres; + + + remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + if (partid == sn_partition_id) { + continue; + } + + part = &xpc_partitions[partid]; + + if (part->act_state == XPC_P_INACTIVE || + part->act_state == XPC_P_DEACTIVATING) { + continue; + } + + /* pull the remote_hb cache line */ + bres = xp_bte_copy(part->remote_vars_pa, + ia64_tpa((u64) remote_vars), + XPC_VARS_ALIGNED_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + XPC_DEACTIVATE_PARTITION(part, + xpc_map_bte_errors(bres)); + continue; + } + + dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat" + " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid, + remote_vars->heartbeat, part->last_heartbeat, + remote_vars->kdb_status, + remote_vars->heartbeating_to_mask); + + if (((remote_vars->heartbeat == part->last_heartbeat) && + (remote_vars->kdb_status == 0)) || + !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { + + XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); + continue; + } + + part->last_heartbeat = remote_vars->heartbeat; + } +} + + +/* + * Get a copy of the remote partition's rsvd page. + * + * remote_rp points to a buffer that is cacheline aligned for BTE copies and + * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE. + */ +static enum xpc_retval +xpc_get_remote_rp(int nasid, u64 *discovered_nasids, + struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa) +{ + int bres, i; + + + /* get the reserved page's physical address */ + + *remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp, + XPC_RSVD_PAGE_ALIGNED_SIZE); + if (*remote_rsvd_page_pa == 0) { + return xpcNoRsvdPageAddr; + } + + + /* pull over the reserved page structure */ + + bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp), + XPC_RSVD_PAGE_ALIGNED_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + return xpc_map_bte_errors(bres); + } + + + if (discovered_nasids != NULL) { + for (i = 0; i < XP_NASID_MASK_WORDS; i++) { + discovered_nasids[i] |= remote_rp->part_nasids[i]; + } + } + + + /* check that the partid is for another partition */ + + if (remote_rp->partid < 1 || + remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { + return xpcInvalidPartid; + } + + if (remote_rp->partid == sn_partition_id) { + return xpcLocalPartid; + } + + + if (XPC_VERSION_MAJOR(remote_rp->version) != + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { + return xpcBadVersion; + } + + return xpcSuccess; +} + + +/* + * Get a copy of the remote partition's XPC variables. + * + * remote_vars points to a buffer that is cacheline aligned for BTE copies and + * assumed to be of size XPC_VARS_ALIGNED_SIZE. + */ +static enum xpc_retval +xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) +{ + int bres; + + + if (remote_vars_pa == 0) { + return xpcVarsNotSet; + } + + + /* pull over the cross partition variables */ + + bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), + XPC_VARS_ALIGNED_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + return xpc_map_bte_errors(bres); + } + + if (XPC_VERSION_MAJOR(remote_vars->version) != + XPC_VERSION_MAJOR(XPC_V_VERSION)) { + return xpcBadVersion; + } + + return xpcSuccess; +} + + +/* + * Prior code has determine the nasid which generated an IPI. Inspect + * that nasid to determine if its partition needs to be activated or + * deactivated. + * + * A partition is consider "awaiting activation" if our partition + * flags indicate it is not active and it has a heartbeat. A + * partition is considered "awaiting deactivation" if our partition + * flags indicate it is active but it has no heartbeat or it is not + * sending its heartbeat to us. + * + * To determine the heartbeat, the remote nasid must have a properly + * initialized reserved page. + */ +static void +xpc_identify_act_IRQ_req(int nasid) +{ + struct xpc_rsvd_page *remote_rp; + struct xpc_vars *remote_vars; + u64 remote_rsvd_page_pa; + u64 remote_vars_pa; + partid_t partid; + struct xpc_partition *part; + enum xpc_retval ret; + + + /* pull over the reserved page structure */ + + remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; + + ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa); + if (ret != xpcSuccess) { + dev_warn(xpc_part, "unable to get reserved page from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + return; + } + + remote_vars_pa = remote_rp->vars_pa; + partid = remote_rp->partid; + part = &xpc_partitions[partid]; + + + /* pull over the cross partition variables */ + + remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; + + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); + if (ret != xpcSuccess) { + + dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + return; + } + + + part->act_IRQ_rcvd++; + + dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " + "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, + remote_vars->heartbeat, remote_vars->heartbeating_to_mask); + + + if (part->act_state == XPC_P_INACTIVE) { + + part->remote_rp_pa = remote_rsvd_page_pa; + dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", + part->remote_rp_pa); + + part->remote_vars_pa = remote_vars_pa; + dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", + part->remote_vars_pa); + + part->last_heartbeat = remote_vars->heartbeat; + dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", + part->last_heartbeat); + + part->remote_vars_part_pa = remote_vars->vars_part_pa; + dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", + part->remote_vars_part_pa); + + part->remote_act_nasid = remote_vars->act_nasid; + dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n", + part->remote_act_nasid); + + part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid; + dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n", + part->remote_act_phys_cpuid); + + part->remote_amos_page_pa = remote_vars->amos_page_pa; + dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", + part->remote_amos_page_pa); + + xpc_activate_partition(part); + + } else if (part->remote_amos_page_pa != remote_vars->amos_page_pa || + !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { + + part->reactivate_nasid = nasid; + XPC_DEACTIVATE_PARTITION(part, xpcReactivating); + } +} + + +/* + * Loop through the activation AMO variables and process any bits + * which are set. Each bit indicates a nasid sending a partition + * activation or deactivation request. + * + * Return #of IRQs detected. + */ +int +xpc_identify_act_IRQ_sender(void) +{ + int word, bit; + u64 nasid_mask; + u64 nasid; /* remote nasid */ + int n_IRQs_detected = 0; + AMO_t *act_amos; + struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page; + + + act_amos = xpc_vars->act_amos; + + + /* scan through act AMO variable looking for non-zero entries */ + for (word = 0; word < XP_NASID_MASK_WORDS; word++) { + + nasid_mask = xpc_IPI_receive(&act_amos[word]); + if (nasid_mask == 0) { + /* no IRQs from nasids in this variable */ + continue; + } + + dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, + nasid_mask); + + + /* + * If this nasid has been added to the machine since + * our partition was reset, this will retain the + * remote nasid in our reserved pages machine mask. + * This is used in the event of module reload. + */ + rp->mach_nasids[word] |= nasid_mask; + + + /* locate the nasid(s) which sent interrupts */ + + for (bit = 0; bit < (8 * sizeof(u64)); bit++) { + if (nasid_mask & (1UL << bit)) { + n_IRQs_detected++; + nasid = XPC_NASID_FROM_W_B(word, bit); + dev_dbg(xpc_part, "interrupt from nasid %ld\n", + nasid); + xpc_identify_act_IRQ_req(nasid); + } + } + } + return n_IRQs_detected; +} + + +/* + * Mark specified partition as active. + */ +enum xpc_retval +xpc_mark_partition_active(struct xpc_partition *part) +{ + unsigned long irq_flags; + enum xpc_retval ret; + + + dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + if (part->act_state == XPC_P_ACTIVATING) { + part->act_state = XPC_P_ACTIVE; + ret = xpcSuccess; + } else { + DBUG_ON(part->reason == xpcSuccess); + ret = part->reason; + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + return ret; +} + + +/* + * Notify XPC that the partition is down. + */ +void +xpc_deactivate_partition(const int line, struct xpc_partition *part, + enum xpc_retval reason) +{ + unsigned long irq_flags; + partid_t partid = XPC_PARTID(part); + + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_INACTIVE) { + XPC_SET_REASON(part, reason, line); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + if (reason == xpcReactivating) { + /* we interrupt ourselves to reactivate partition */ + xpc_IPI_send_reactivate(part); + } + return; + } + if (part->act_state == XPC_P_DEACTIVATING) { + if ((part->reason == xpcUnloading && reason != xpcUnloading) || + reason == xpcReactivating) { + XPC_SET_REASON(part, reason, line); + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + return; + } + + part->act_state = XPC_P_DEACTIVATING; + XPC_SET_REASON(part, reason, line); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + XPC_DISALLOW_HB(partid, xpc_vars); + + dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid, + reason); + + xpc_partition_down(part, reason); +} + + +/* + * Mark specified partition as active. + */ +void +xpc_mark_partition_inactive(struct xpc_partition *part) +{ + unsigned long irq_flags; + + + dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", + XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; +} + + +/* + * SAL has provided a partition and machine mask. The partition mask + * contains a bit for each even nasid in our partition. The machine + * mask contains a bit for each even nasid in the entire machine. + * + * Using those two bit arrays, we can determine which nasids are + * known in the machine. Each should also have a reserved page + * initialized if they are available for partitioning. + */ +void +xpc_discovery(void) +{ + void *remote_rp_base; + struct xpc_rsvd_page *remote_rp; + struct xpc_vars *remote_vars; + u64 remote_rsvd_page_pa; + u64 remote_vars_pa; + int region; + int max_regions; + int nasid; + struct xpc_rsvd_page *rp; + partid_t partid; + struct xpc_partition *part; + u64 *discovered_nasids; + enum xpc_retval ret; + + + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE, + GFP_KERNEL, &remote_rp_base); + if (remote_rp == NULL) { + return; + } + remote_vars = (struct xpc_vars *) remote_rp; + + + discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS, + GFP_KERNEL); + if (discovered_nasids == NULL) { + kfree(remote_rp_base); + return; + } + memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS); + + rp = (struct xpc_rsvd_page *) xpc_rsvd_page; + + /* + * The term 'region' in this context refers to the minimum number of + * nodes that can comprise an access protection grouping. The access + * protection is in regards to memory, IOI and IPI. + */ +//>>> move the next two #defines into either include/asm-ia64/sn/arch.h or +//>>> include/asm-ia64/sn/addrs.h +#define SH1_MAX_REGIONS 64 +#define SH2_MAX_REGIONS 256 + max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS; + + for (region = 0; region < max_regions; region++) { + + if ((volatile int) xpc_exiting) { + break; + } + + dev_dbg(xpc_part, "searching region %d\n", region); + + for (nasid = (region * sn_region_size * 2); + nasid < ((region + 1) * sn_region_size * 2); + nasid += 2) { + + if ((volatile int) xpc_exiting) { + break; + } + + dev_dbg(xpc_part, "checking nasid %d\n", nasid); + + + if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) { + dev_dbg(xpc_part, "PROM indicates Nasid %d is " + "part of the local partition; skipping " + "region\n", nasid); + break; + } + + if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) { + dev_dbg(xpc_part, "PROM indicates Nasid %d was " + "not on Numa-Link network at reset\n", + nasid); + continue; + } + + if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { + dev_dbg(xpc_part, "Nasid %d is part of a " + "partition which was previously " + "discovered\n", nasid); + continue; + } + + + /* pull over the reserved page structure */ + + ret = xpc_get_remote_rp(nasid, discovered_nasids, + remote_rp, &remote_rsvd_page_pa); + if (ret != xpcSuccess) { + dev_dbg(xpc_part, "unable to get reserved page " + "from nasid %d, reason=%d\n", nasid, + ret); + + if (ret == xpcLocalPartid) { + break; + } + continue; + } + + remote_vars_pa = remote_rp->vars_pa; + + partid = remote_rp->partid; + part = &xpc_partitions[partid]; + + + /* pull over the cross partition variables */ + + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); + if (ret != xpcSuccess) { + dev_dbg(xpc_part, "unable to get XPC variables " + "from nasid %d, reason=%d\n", nasid, + ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + continue; + } + + if (part->act_state != XPC_P_INACTIVE) { + dev_dbg(xpc_part, "partition %d on nasid %d is " + "already activating\n", partid, nasid); + break; + } + + /* + * Register the remote partition's AMOs with SAL so it + * can handle and cleanup errors within that address + * range should the remote partition go down. We don't + * unregister this range because it is difficult to + * tell when outstanding writes to the remote partition + * are finished and thus when it is thus safe to + * unregister. This should not result in wasted space + * in the SAL xp_addr_region table because we should + * get the same page for remote_act_amos_pa after + * module reloads and system reboots. + */ + if (sn_register_xp_addr_region( + remote_vars->amos_page_pa, + PAGE_SIZE, 1) < 0) { + dev_dbg(xpc_part, "partition %d failed to " + "register xp_addr region 0x%016lx\n", + partid, remote_vars->amos_page_pa); + + XPC_SET_REASON(part, xpcPhysAddrRegFailed, + __LINE__); + break; + } + + /* + * The remote nasid is valid and available. + * Send an interrupt to that nasid to notify + * it that we are ready to begin activation. + */ + dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, " + "nasid %d, phys_cpuid 0x%x\n", + remote_vars->amos_page_pa, + remote_vars->act_nasid, + remote_vars->act_phys_cpuid); + + xpc_IPI_send_activate(remote_vars); + } + } + + kfree(discovered_nasids); + kfree(remote_rp_base); +} + + +/* + * Given a partid, get the nasids owned by that partition from the + * remote partitions reserved page. + */ +enum xpc_retval +xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) +{ + struct xpc_partition *part; + u64 part_nasid_pa; + int bte_res; + + + part = &xpc_partitions[partid]; + if (part->remote_rp_pa == 0) { + return xpcPartitionDown; + } + + part_nasid_pa = part->remote_rp_pa + + (u64) &((struct xpc_rsvd_page *) 0)->part_nasids; + + bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), + L1_CACHE_ALIGN(XP_NASID_MASK_BYTES), + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + + return xpc_map_bte_errors(bte_res); +} + -- cgit v0.10.2 From a2d974da0afe659cff98913184a97c0ee686d02b Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 20:50:00 -0700 Subject: [IA64-SGI] SGI Altix cross partition functionality [3/3] This patch contains the cross partition pseudo-ethernet driver (XPNET) functional support module. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 6959736..4351c4f 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o xp-y := xp_main.o xp_nofault.o obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o xpc-y := xpc_main.o xpc_channel.o xpc_partition.o +obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c new file mode 100644 index 0000000..78c13d6 --- /dev/null +++ b/arch/ia64/sn/kernel/xpnet.c @@ -0,0 +1,715 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. + */ + + +/* + * Cross Partition Network Interface (XPNET) support + * + * XPNET provides a virtual network layered on top of the Cross + * Partition communication layer. + * + * XPNET provides direct point-to-point and broadcast-like support + * for an ethernet-like device. The ethernet broadcast medium is + * replaced with a point-to-point message structure which passes + * pointers to a DMA-capable block that a remote partition should + * retrieve and pass to the upper level networking layer. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * The message payload transferred by XPC. + * + * buf_pa is the physical address where the DMA should pull from. + * + * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a + * cacheline boundary. To accomplish this, we record the number of + * bytes from the beginning of the first cacheline to the first useful + * byte of the skb (leadin_ignore) and the number of bytes from the + * last useful byte of the skb to the end of the last cacheline + * (tailout_ignore). + * + * size is the number of bytes to transfer which includes the skb->len + * (useful bytes of the senders skb) plus the leadin and tailout + */ +struct xpnet_message { + u16 version; /* Version for this message */ + u16 embedded_bytes; /* #of bytes embedded in XPC message */ + u32 magic; /* Special number indicating this is xpnet */ + u64 buf_pa; /* phys address of buffer to retrieve */ + u32 size; /* #of bytes in buffer */ + u8 leadin_ignore; /* #of bytes to ignore at the beginning */ + u8 tailout_ignore; /* #of bytes to ignore at the end */ + unsigned char data; /* body of small packets */ +}; + +/* + * Determine the size of our message, the cacheline aligned size, + * and then the number of message will request from XPC. + * + * XPC expects each message to exist in an individual cacheline. + */ +#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) +#define XPNET_MSG_DATA_MAX \ + (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) +#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) +#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) + + +#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) +#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) + +/* + * Version number of XPNET implementation. XPNET can always talk to versions + * with same major #, and never talk to versions with a different version. + */ +#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) +#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) + +#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ +#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ +#define XPNET_MAGIC 0x88786984 /* "XNET" */ + +#define XPNET_VALID_MSG(_m) \ + ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ + && (msg->magic == XPNET_MAGIC)) + +#define XPNET_DEVICE_NAME "xp0" + + +/* + * When messages are queued with xpc_send_notify, a kmalloc'd buffer + * of the following type is passed as a notification cookie. When the + * notification function is called, we use the cookie to decide + * whether all outstanding message sends have completed. The skb can + * then be released. + */ +struct xpnet_pending_msg { + struct list_head free_list; + struct sk_buff *skb; + atomic_t use_count; +}; + +/* driver specific structure pointed to by the device structure */ +struct xpnet_dev_private { + struct net_device_stats stats; +}; + +struct net_device *xpnet_device; + +/* + * When we are notified of other partitions activating, we add them to + * our bitmask of partitions to which we broadcast. + */ +static u64 xpnet_broadcast_partitions; +/* protect above */ +static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED; + +/* + * Since the Block Transfer Engine (BTE) is being used for the transfer + * and it relies upon cache-line size transfers, we need to reserve at + * least one cache-line for head and tail alignment. The BTE is + * limited to 8MB transfers. + * + * Testing has shown that changing MTU to greater than 64KB has no effect + * on TCP as the two sides negotiate a Max Segment Size that is limited + * to 64K. Other protocols May use packets greater than this, but for + * now, the default is 64KB. + */ +#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) +/* 32KB has been determined to be the ideal */ +#define XPNET_DEF_MTU (0x8000UL) + + +/* + * The partition id is encapsulated in the MAC address. The following + * define locates the octet the partid is in. + */ +#define XPNET_PARTID_OCTET 1 +#define XPNET_LICENSE_OCTET 2 + + +/* + * Define the XPNET debug device structure that is to be used with dev_dbg(), + * dev_err(), dev_warn(), and dev_info(). + */ +struct device_driver xpnet_dbg_name = { + .name = "xpnet" +}; + +struct device xpnet_dbg_subname = { + .bus_id = {0}, /* set to "" */ + .driver = &xpnet_dbg_name +}; + +struct device *xpnet = &xpnet_dbg_subname; + +/* + * Packet was recevied by XPC and forwarded to us. + */ +static void +xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) +{ + struct sk_buff *skb; + bte_result_t bret; + struct xpnet_dev_private *priv = + (struct xpnet_dev_private *) xpnet_device->priv; + + + if (!XPNET_VALID_MSG(msg)) { + /* + * Packet with a different XPC version. Ignore. + */ + xpc_received(partid, channel, (void *) msg); + + priv->stats.rx_errors++; + + return; + } + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + + /* reserve an extra cache line */ + skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); + if (!skb) { + dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", + msg->size + L1_CACHE_BYTES); + + xpc_received(partid, channel, (void *) msg); + + priv->stats.rx_errors++; + + return; + } + + /* + * The allocated skb has some reserved space. + * In order to use bte_copy, we need to get the + * skb->data pointer moved forward. + */ + skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & + (L1_CACHE_BYTES - 1)) + + msg->leadin_ignore)); + + /* + * Update the tail pointer to indicate data actually + * transferred. + */ + skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); + + /* + * Move the data over from the the other side. + */ + if ((XPNET_VERSION_MINOR(msg->version) == 1) && + (msg->embedded_bytes != 0)) { + dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " + "%lu)\n", skb->data, &msg->data, + (size_t) msg->embedded_bytes); + + memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes); + } else { + dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" + "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, + (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + msg->size); + + bret = bte_copy(msg->buf_pa, + __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + + if (bret != BTE_SUCCESS) { + // >>> Need better way of cleaning skb. Currently skb + // >>> appears in_use and we can't just call + // >>> dev_kfree_skb. + dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " + "error=0x%x\n", (void *)msg->buf_pa, + (void *)__pa((u64)skb->data & + ~(L1_CACHE_BYTES - 1)), + msg->size, bret); + + xpc_received(partid, channel, (void *) msg); + + priv->stats.rx_errors++; + + return; + } + } + + dev_dbg(xpnet, "head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *) skb->head, + (void *) skb->data, (void *) skb->tail, (void *) skb->end, + skb->len); + + skb->dev = xpnet_device; + skb->protocol = eth_type_trans(skb, xpnet_device); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " + "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", + (void *) skb->head, (void *) skb->data, (void *) skb->tail, + (void *) skb->end, skb->len); + + + xpnet_device->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len + ETH_HLEN; + + netif_rx_ni(skb); + xpc_received(partid, channel, (void *) msg); +} + + +/* + * This is the handler which XPC calls during any sort of change in + * state or message reception on a connection. + */ +static void +xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, + void *data, void *key) +{ + long bp; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(channel != XPC_NET_CHANNEL); + + switch(reason) { + case xpcMsgReceived: /* message received */ + DBUG_ON(data == NULL); + + xpnet_receive(partid, channel, (struct xpnet_message *) data); + break; + + case xpcConnected: /* connection completed to a partition */ + spin_lock_bh(&xpnet_broadcast_lock); + xpnet_broadcast_partitions |= 1UL << (partid -1 ); + bp = xpnet_broadcast_partitions; + spin_unlock_bh(&xpnet_broadcast_lock); + + netif_carrier_on(xpnet_device); + + dev_dbg(xpnet, "%s connection created to partition %d; " + "xpnet_broadcast_partitions=0x%lx\n", + xpnet_device->name, partid, bp); + break; + + default: + spin_lock_bh(&xpnet_broadcast_lock); + xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); + bp = xpnet_broadcast_partitions; + spin_unlock_bh(&xpnet_broadcast_lock); + + if (bp == 0) { + netif_carrier_off(xpnet_device); + } + + dev_dbg(xpnet, "%s disconnected from partition %d; " + "xpnet_broadcast_partitions=0x%lx\n", + xpnet_device->name, partid, bp); + break; + + } +} + + +static int +xpnet_dev_open(struct net_device *dev) +{ + enum xpc_retval ret; + + + dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, " + "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, + XPNET_MAX_IDLE_KTHREADS); + + ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, + XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); + if (ret != xpcSuccess) { + dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " + "ret=%d\n", dev->name, ret); + + return -ENOMEM; + } + + dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); + + return 0; +} + + +static int +xpnet_dev_stop(struct net_device *dev) +{ + xpc_disconnect(XPC_NET_CHANNEL); + + dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); + + return 0; +} + + +static int +xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + /* 68 comes from min TCP+IP+MAC header */ + if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) { + dev_err(xpnet, "ifconfig %s mtu %d failed; value must be " + "between 68 and %ld\n", dev->name, new_mtu, + XPNET_MAX_MTU); + return -EINVAL; + } + + dev->mtu = new_mtu; + dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu); + return 0; +} + + +/* + * Required for the net_device structure. + */ +static int +xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) +{ + return 0; +} + + +/* + * Return statistics to the caller. + */ +static struct net_device_stats * +xpnet_dev_get_stats(struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + + priv = (struct xpnet_dev_private *) dev->priv; + + return &priv->stats; +} + + +/* + * Notification that the other end has received the message and + * DMA'd the skb information. At this point, they are done with + * our side. When all recipients are done processing, we + * release the skb and then release our pending message structure. + */ +static void +xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, + void *__qm) +{ + struct xpnet_pending_msg *queued_msg = + (struct xpnet_pending_msg *) __qm; + + + DBUG_ON(queued_msg == NULL); + + dev_dbg(xpnet, "message to %d notified with reason %d\n", + partid, reason); + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "all acks for skb->head=-x%p\n", + (void *) queued_msg->skb->head); + + dev_kfree_skb_any(queued_msg->skb); + kfree(queued_msg); + } +} + + +/* + * Network layer has formatted a packet (skb) and is ready to place it + * "on the wire". Prepare and send an xpnet_message to all partitions + * which have connected with us and are targets of this packet. + * + * MAC-NOTE: For the XPNET driver, the MAC address contains the + * destination partition_id. If the destination partition id word + * is 0xff, this packet is to broadcast to all partitions. + */ +static int +xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xpnet_pending_msg *queued_msg; + enum xpc_retval ret; + struct xpnet_message *msg; + u64 start_addr, end_addr; + long dp; + u8 second_mac_octet; + partid_t dest_partid; + struct xpnet_dev_private *priv; + u16 embedded_bytes; + + + priv = (struct xpnet_dev_private *) dev->priv; + + + dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *) skb->head, + (void *) skb->data, (void *) skb->tail, (void *) skb->end, + skb->len); + + + /* + * The xpnet_pending_msg tracks how many outstanding + * xpc_send_notifies are relying on this skb. When none + * remain, release the skb. + */ + queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); + if (queued_msg == NULL) { + dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " + "packet\n", sizeof(struct xpnet_pending_msg)); + + priv->stats.tx_errors++; + + return -ENOMEM; + } + + + /* get the beginning of the first cacheline and end of last */ + start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); + end_addr = L1_CACHE_ALIGN((u64) skb->tail); + + /* calculate how many bytes to embed in the XPC message */ + embedded_bytes = 0; + if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { + /* skb->data does fit so embed */ + embedded_bytes = skb->len; + } + + + /* + * Since the send occurs asynchronously, we set the count to one + * and begin sending. Any sends that happen to complete before + * we are done sending will not free the skb. We will be left + * with that task during exit. This also handles the case of + * a packet destined for a partition which is no longer up. + */ + atomic_set(&queued_msg->use_count, 1); + queued_msg->skb = skb; + + + second_mac_octet = skb->data[XPNET_PARTID_OCTET]; + if (second_mac_octet == 0xff) { + /* we are being asked to broadcast to all partitions */ + dp = xpnet_broadcast_partitions; + } else if (second_mac_octet != 0) { + dp = xpnet_broadcast_partitions & + (1UL << (second_mac_octet - 1)); + } else { + /* 0 is an invalid partid. Ignore */ + dp = 0; + } + dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); + + /* + * If we wanted to allow promiscous mode to work like an + * unswitched network, this would be a good point to OR in a + * mask of partitions which should be receiving all packets. + */ + + /* + * Main send loop. + */ + for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; + dest_partid++) { + + + if (!(dp & (1UL << (dest_partid - 1)))) { + /* not destined for this partition */ + continue; + } + + /* remove this partition from the destinations mask */ + dp &= ~(1UL << (dest_partid - 1)); + + + /* found a partition to send to */ + + ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, + XPC_NOWAIT, (void **)&msg); + if (unlikely(ret != xpcSuccess)) { + continue; + } + + msg->embedded_bytes = embedded_bytes; + if (unlikely(embedded_bytes != 0)) { + msg->version = XPNET_VERSION_EMBED; + dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", + &msg->data, skb->data, (size_t) embedded_bytes); + memcpy(&msg->data, skb->data, (size_t) embedded_bytes); + } else { + msg->version = XPNET_VERSION; + } + msg->magic = XPNET_MAGIC; + msg->size = end_addr - start_addr; + msg->leadin_ignore = (u64) skb->data - start_addr; + msg->tailout_ignore = end_addr - (u64) skb->tail; + msg->buf_pa = __pa(start_addr); + + dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" + "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " + "msg->tailout_ignore=%u\n", dest_partid, + XPC_NET_CHANNEL, msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + + atomic_inc(&queued_msg->use_count); + + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, + xpnet_send_completed, queued_msg); + if (unlikely(ret != xpcSuccess)) { + atomic_dec(&queued_msg->use_count); + continue; + } + + } + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "no partitions to receive packet destined for " + "%d\n", dest_partid); + + + dev_kfree_skb(skb); + kfree(queued_msg); + } + + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + + return 0; +} + + +/* + * Deal with transmit timeouts coming from the network layer. + */ +static void +xpnet_dev_tx_timeout (struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + + priv = (struct xpnet_dev_private *) dev->priv; + + priv->stats.tx_errors++; + return; +} + + +static int __init +xpnet_init(void) +{ + int i; + u32 license_num; + int result = -ENOMEM; + + + dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); + + /* + * use ether_setup() to init the majority of our device + * structure and then override the necessary pieces. + */ + xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), + XPNET_DEVICE_NAME, ether_setup); + if (xpnet_device == NULL) { + return -ENOMEM; + } + + netif_carrier_off(xpnet_device); + + xpnet_device->mtu = XPNET_DEF_MTU; + xpnet_device->change_mtu = xpnet_dev_change_mtu; + xpnet_device->open = xpnet_dev_open; + xpnet_device->get_stats = xpnet_dev_get_stats; + xpnet_device->stop = xpnet_dev_stop; + xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit; + xpnet_device->tx_timeout = xpnet_dev_tx_timeout; + xpnet_device->set_config = xpnet_dev_set_config; + + /* + * Multicast assumes the LSB of the first octet is set for multicast + * MAC addresses. We chose the first octet of the MAC to be unlikely + * to collide with any vendor's officially issued MAC. + */ + xpnet_device->dev_addr[0] = 0xfe; + xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id; + license_num = sn_partition_serial_number_val(); + for (i = 3; i >= 0; i--) { + xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = + license_num & 0xff; + license_num = license_num >> 8; + } + + /* + * ether_setup() sets this to a multicast device. We are + * really not supporting multicast at this time. + */ + xpnet_device->flags &= ~IFF_MULTICAST; + + /* + * No need to checksum as it is a DMA transfer. The BTE will + * report an error if the data is not retrievable and the + * packet will be dropped. + */ + xpnet_device->features = NETIF_F_NO_CSUM; + + result = register_netdev(xpnet_device); + if (result != 0) { + free_netdev(xpnet_device); + } + + return result; +} +module_init(xpnet_init); + + +static void __exit +xpnet_exit(void) +{ + dev_info(xpnet, "unregistering network device %s\n", + xpnet_device[0].name); + + unregister_netdev(xpnet_device); + + free_netdev(xpnet_device); +} +module_exit(xpnet_exit); + + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); +MODULE_LICENSE("GPL"); + -- cgit v0.10.2 From 3a7d555bfc4d4631d9118fb4d0ed7ab62cc2ca1c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Mon, 4 Apr 2005 13:14:00 -0700 Subject: [IA64-SGI] convert AMO address found in XPC's reserved page This patch detects the existence of an uncached physical AMO address setup by EFI's XPBOOT (SGI) and converts it to an uncached virtual AMO address. Depends on a patch submitted on 23 March 2005 with the subject of: [PATCH 2/3] SGI Altix cross partition functionality (2nd revision) Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index b31d9988..2c3c4a8 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -204,6 +204,19 @@ xpc_rsvd_page_init(void) return NULL; } } + } else if (!IS_AMO_ADDRESS((u64) amos_page)) { + /* + * EFI's XPBOOT can also set amos_page in the reserved page, + * but it happens to leave it as an uncached physical address + * and we need it to be an uncached virtual, so we'll have to + * convert it. + */ + if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { + dev_err(xpc_part, "previously used amos_page address " + "is bad = 0x%p\n", (void *) amos_page); + return NULL; + } + amos_page = (AMO_t *) TO_AMO((u64) amos_page); } memset(xpc_vars, 0, sizeof(struct xpc_vars)); @@ -944,7 +957,7 @@ xpc_discovery(void) /* * Given a partid, get the nasids owned by that partition from the - * remote partitions reserved page. + * remote partition's reserved page. */ enum xpc_retval xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h index 960d626..1bfdfb4 100644 --- a/include/asm-ia64/sn/addrs.h +++ b/include/asm-ia64/sn/addrs.h @@ -136,6 +136,7 @@ */ #define CAC_BASE (CACHED | AS_CAC_SPACE) #define AMO_BASE (UNCACHED | AS_AMO_SPACE) +#define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE) #define GET_BASE (CACHED | AS_GET_SPACE) /* @@ -161,6 +162,13 @@ /* + * Macros to test for address type. + */ +#define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE) +#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE) + + +/* * The following definitions pertain to the IO special address * space. They define the location of the big and little windows * of any given node. -- cgit v0.10.2 From c0b12422e5e1d041026dd27074de17d2d7e32c4e Mon Sep 17 00:00:00 2001 From: Colin Ngam Date: Fri, 18 Mar 2005 16:38:00 -0700 Subject: [IA64-SGI] Altix only: Register Error Interrupt The following patch ensures that the correct error interrupt handling routine is initialized. This patch is based on the 2.6.12 ia64 release tree. Signed-off-by: Colin Ngam Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 18160a0..9e07f54 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void) if (status) continue; + /* Attach the error interrupt handlers */ + if (nasid & 1) + ice_error_init(hubdev); + else + hub_error_init(hubdev); + for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; @@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void) sn_flush_device_list; } - if (!(i & 1)) - hub_error_init(hubdev); - else - ice_error_init(hubdev); } } -- cgit v0.10.2 From bb0fc085457cf6a865b8232b0cefab3a7819df44 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 24 Mar 2005 22:58:00 -0700 Subject: [IA64] use common pxm function This patch simplifies a couple places where we search for _PXM values in ACPI namespace. Thanks, Signed-off-by: Alex Williamson Signed-off-by: Tony Luck diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 6a8fcba..b8db6e3 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus) static void __init sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) { - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - union acpi_object *obj; - acpi_handle phandle; unsigned int node; + int pxm; ioc->node = MAX_NUMNODES; - /* - * Check for a _PXM on this node first. We don't typically see - * one here, so we'll end up getting it from the parent. - */ - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) { - if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) - return; - - /* Reset the acpi buffer */ - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; - - if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL, - &buffer))) - return; - } + pxm = acpi_get_pxm(handle); - if (!buffer.length || !buffer.pointer) + if (pxm < 0) return; - obj = buffer.pointer; - - if (obj->type != ACPI_TYPE_INTEGER || - obj->integer.value >= MAX_PXM_DOMAINS) { - acpi_os_free(buffer.pointer); - return; - } - - node = pxm_to_nid_map[obj->integer.value]; - acpi_os_free(buffer.pointer); + node = pxm_to_nid_map[pxm]; if (node >= MAX_NUMNODES || !node_online(node)) return; diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a8e99c5..72dfd9e 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) union acpi_object *obj; struct acpi_table_iosapic *iosapic; unsigned int gsi_base; - int node; + int pxm, node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) @@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) gsi_base = iosapic->global_irq_base; acpi_os_free(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; /* - * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell + * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell * us which node to associate this with. */ - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) - return AE_OK; - - if (!buffer.length || !buffer.pointer) - return AE_OK; - - obj = buffer.pointer; - - if (obj->type != ACPI_TYPE_INTEGER || - obj->integer.value >= MAX_PXM_DOMAINS) { - acpi_os_free(buffer.pointer); + pxm = acpi_get_pxm(handle); + if (pxm < 0) return AE_OK; - } - node = pxm_to_nid_map[obj->integer.value]; - acpi_os_free(buffer.pointer); + node = pxm_to_nid_map[pxm]; if (node >= MAX_NUMNODES || !node_online(node) || cpus_empty(node_to_cpumask(node))) -- cgit v0.10.2 From de7548d0e202263bb6bfd7574a7889e85a691937 Mon Sep 17 00:00:00 2001 From: Mike Habeck Date: Fri, 25 Mar 2005 19:34:00 -0700 Subject: [IA64-SGI] Altix only: Fix for sn_dma_flush The following patch fixes a bug in the SGI Altix sn_dma_flush code. sn_dma_flush is broken in 2.6. The code isn't waiting for the DMA data to be flushed out of the PIC ASIC. This patch is based off the linux-ia64-test-2.6.12 tree Signed-off-by: Mike Habeck Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index c906859..64af2b2 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr) spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> sfdl_flush_lock, flags); - p->sfdl_flush_value = 0; + *p->sfdl_flush_addr = 0; /* force an interrupt. */ *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; -- cgit v0.10.2 From 7d5f9c0f10255000ca007fb03773c6b825c2b9ce Mon Sep 17 00:00:00 2001 From: Zwane Mwaikambo Date: Wed, 30 Mar 2005 21:40:00 -0700 Subject: [IA64] reduce cacheline bouncing in cpu_idle_wait Andi noted that during normal runtime cpu_idle_map is bounced around a lot, and occassionally at a higher frequency than the timer interrupt wakeup which we normally exit pm_idle from. So switch to a percpu variable. I didn't move things to the slow path because it would involve adding scheduler code to wakeup the idle thread on the cpus we're waiting for. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andrew Morton Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 7c43aea..c0140f4 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -50,7 +50,7 @@ #include "sigframe.h" void (*ia64_mark_idle)(int); -static cpumask_t cpu_idle_map; +static DEFINE_PER_CPU(unsigned int, cpu_idle_state); unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); @@ -223,20 +223,31 @@ static inline void play_dead(void) } #endif /* CONFIG_HOTPLUG_CPU */ - void cpu_idle_wait(void) { - int cpu; - cpumask_t map; + unsigned int cpu, this_cpu = get_cpu(); + cpumask_t map; + + set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); + put_cpu(); - for_each_online_cpu(cpu) - cpu_set(cpu, cpu_idle_map); + cpus_clear(map); + for_each_online_cpu(cpu) { + per_cpu(cpu_idle_state, cpu) = 1; + cpu_set(cpu, map); + } - wmb(); - do { - ssleep(1); - cpus_and(map, cpu_idle_map, cpu_online_map); - } while (!cpus_empty(map)); + __get_cpu_var(cpu_idle_state) = 0; + + wmb(); + do { + ssleep(1); + for_each_online_cpu(cpu) { + if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) + cpu_clear(cpu, map); + } + cpus_and(map, map, cpu_online_map); + } while (!cpus_empty(map)); } EXPORT_SYMBOL_GPL(cpu_idle_wait); @@ -244,7 +255,6 @@ void __attribute__((noreturn)) cpu_idle (void) { void (*mark_idle)(int) = ia64_mark_idle; - int cpu = smp_processor_id(); /* endless idle loop with no priority at all */ while (1) { @@ -255,12 +265,13 @@ cpu_idle (void) while (!need_resched()) { void (*idle)(void); + if (__get_cpu_var(cpu_idle_state)) + __get_cpu_var(cpu_idle_state) = 0; + + rmb(); if (mark_idle) (*mark_idle)(1); - if (cpu_isset(cpu, cpu_idle_map)) - cpu_clear(cpu, cpu_idle_map); - rmb(); idle = pm_idle; if (!idle) idle = default_idle; -- cgit v0.10.2 From 446b8831f5acf2076fa58a66286789eb84f3df2c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 5 Apr 2005 17:47:00 -0700 Subject: [IA64] fix ia64 syscall auditing Attached is a patch against David's audit.17 kernel that adds checks for the TIF_SYSCALL_AUDIT thread flag to the ia64 system call and signal handling code paths. The patch enables auditing of system calls set up via fsys_bubble_down, as well as ensuring that audit_syscall_exit() is called on return from sigreturn. Neglecting to check for TIF_SYSCALL_AUDIT at these points results in incorrect information in audit_context, causing frequent system panics when system call auditing is enabled on an ia64 system. I have tested this patch and have seen no problems with it. [Original patch from Amy Griffis ported to current kernel by David Woodhouse] From: Amy Griffis From: David Woodhouse Signed-off-by: Chris Wright Signed-off-by: Greg Kroah-Hartman Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 0d8650f..4f3cdef 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down) movl r2=ia64_ret_from_syscall ;; mov rp=r2 // set the real return addr - tbit.z p8,p0=r3,TIF_SYSCALL_TRACE + and r3=_TIF_SYSCALL_TRACEAUDIT,r3 ;; + cmp.eq p8,p0=r3,r0 + (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 (p8) br.call.sptk.many b6=b6 // ignore this return addr br.cond.sptk ia64_trace_syscall diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 6891d86..499b7e5 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr) * could be corrupted. */ retval = (long) &ia64_leave_kernel; - if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (test_thread_flag(TIF_SYSCALL_TRACE) + || test_thread_flag(TIF_SYSCALL_AUDIT)) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. -- cgit v0.10.2 From b1b901c2029aa0fd8aa4ac5f04f31648ae2358b4 Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Wed, 6 Apr 2005 00:07:00 -0700 Subject: [IA64] MCA recovery improvements Jack Steiner uncovered some opportunities for improvement in the MCA recovery code. 1) Set bsp to save registers on the kernel stack. 2) Disable interrupts while in the MCA recovery code. 3) Change the way the user process is killed, to avoid a panic in schedule. Testing shows that these changes make the recovery code much more reliable with the 2.6.12 kernel. Signed-off-by: Russ Anderson Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index ab47817..abc0113 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr) spin_unlock(&mca_bh_lock); /* This process is about to be killed itself */ - force_sig(SIGKILL, current); - schedule(); + do_exit(SIGKILL); } /** @@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; psr2->cpl = 0; psr2->ri = 0; + psr2->i = 0; return 1; } diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index bcfa05a..2d7e021 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S @@ -10,6 +10,7 @@ #include #include +#include GLOBAL_ENTRY(mca_handler_bhhook) invala // clear RSE ? @@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook) ;; alloc r16=ar.pfs,0,2,1,0 // make a new frame ;; + mov ar.rsc=0 + ;; mov r13=IA64_KR(CURRENT) // current task pointer ;; - adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 + mov r2=r13 + ;; + addl r22=IA64_RBS_OFFSET,r2 + ;; + mov ar.bspstore=r22 ;; - ld8 r12=[r12] // stack pointer + addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; + adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 + ;; + st1 [r2]=r0 // clear current->thread.on_ustack flag mov loc0=r16 movl loc1=mca_handler_bh // recovery C function ;; @@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook) ;; mov loc1=rp ;; - br.call.sptk.many rp=b6 // not return ... + ssm psr.i + ;; + br.call.sptk.many rp=b6 // does not return ... ;; mov ar.pfs=loc0 mov rp=loc1 -- cgit v0.10.2 From 32709d8ae67356559839a9a9e4b26f79134e45a6 Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Fri, 8 Apr 2005 14:23:00 -0700 Subject: [IA64] SAL to OS callbacks cannot call sleeping When SAL calls back into the OS, the OS code is running with preempt disabled so it cannot call sleeping functions. Signed-off-by: Keith Owens Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 857774b..6546db6 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c @@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize; * This function is the callback routine that SAL calls to log error * info for platform errors. buf is appended to sn_oemdata, resizing as * required. + * Note: this is a SAL to OS callback, running under the same rules as the SAL + * code. SAL calls are run with preempt disabled so this routine must not + * sleep. vmalloc can sleep so print_hook cannot resize the output buffer + * itself, instead it must set the required size and return to let the caller + * resize the buffer then redrive the SAL call. */ static int print_hook(const char *fmt, ...) { @@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...) vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); len = strlen(buf); - while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) { - u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000); - if (!newbuf) { - printk(KERN_ERR "%s: unable to extend sn_oemdata\n", - __FUNCTION__); - return 0; - } - memcpy(newbuf, *sn_oemdata, *sn_oemdata_size); - vfree(*sn_oemdata); - *sn_oemdata = newbuf; - } - memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1); + if (*sn_oemdata_size + len <= sn_oemdata_bufsize) + memcpy(*sn_oemdata + *sn_oemdata_size, buf, len); *sn_oemdata_size += len; return 0; } @@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, sn_oemdata = oemdata; sn_oemdata_size = oemdata_size; sn_oemdata_bufsize = 0; - ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); + *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */ + while (*sn_oemdata_size > sn_oemdata_bufsize) { + u8 *newbuf = vmalloc(*sn_oemdata_size); + if (!newbuf) { + printk(KERN_ERR "%s: unable to extend sn_oemdata\n", + __FUNCTION__); + return 1; + } + vfree(*sn_oemdata); + *sn_oemdata = newbuf; + sn_oemdata_bufsize = *sn_oemdata_size; + *sn_oemdata_size = 0; + ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); + } up(&sn_oemdata_mutex); return 0; } -- cgit v0.10.2 From 3ea8b477b4b9d3e75b5e9b8aea41259f45031823 Mon Sep 17 00:00:00 2001 From: Mark Maule Date: Mon, 11 Apr 2005 21:20:00 -0700 Subject: [IA64] altix: fix TIOCA dmamap list_add Correct a bug where tioca_dma_mapped() is putting tioca dma map structs on the wrong list. Signed-off-by: Mark Maule Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 54a0dd4..8dae9eb 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) ca_dmamap->cad_dma_addr = bus_addr; ca_dmamap->cad_gart_size = entries; ca_dmamap->cad_gart_entry = entry; - list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list); + list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); if (xio_addr % ps) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); -- cgit v0.10.2 From 012914dad25bd5cacf88af4429eecda62a06020d Mon Sep 17 00:00:00 2001 From: Russ Anderson <(rja@sgi.com)> Date: Sat, 23 Apr 2005 00:08:00 -0700 Subject: [patch] MCA recovery module undefined symbol fix The patch "MCA recovery improvements" added do_exit to mca_drv.c. That's fine when the mca recovery code is built in the kernel (CONFIG_IA64_MCA_RECOVERY=y) but breaks building the mca recovery code as a module (CONFIG_IA64_MCA_RECOVERY=m). Most users are currently building this as a module, as loading and unloading the module provides a very convenient way to turn on/off error recovery. This patch exports do_exit, so mca_drv.c can build as a module. Signed-off-by: Russ Anderson (rja@sgi.com) Signed-off-by: Tony Luck diff --git a/kernel/exit.c b/kernel/exit.c index 7be283d..edaa50b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -846,6 +846,8 @@ fastcall NORET_TYPE void do_exit(long code) for (;;) ; } +EXPORT_SYMBOL_GPL(do_exit); + NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) -- cgit v0.10.2 From b43309578477ff3271945d4efb920f558a309b4a Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 May 2005 14:23:13 -0700 Subject: [NETFILTER]: Missing owner-field initialization in iptable_raw Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 01b4a3c..47449ba 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -103,13 +103,15 @@ static struct nf_hook_ops ipt_ops[] = { .hook = ipt_hook, .pf = PF_INET, .hooknum = NF_IP_PRE_ROUTING, - .priority = NF_IP_PRI_RAW + .priority = NF_IP_PRI_RAW, + .owner = THIS_MODULE, }, { .hook = ipt_hook, .pf = PF_INET, .hooknum = NF_IP_LOCAL_OUT, - .priority = NF_IP_PRI_RAW + .priority = NF_IP_PRI_RAW, + .owner = THIS_MODULE, }, }; -- cgit v0.10.2 From 31da185d8162ae0f30a13ed945f1f4d28d158133 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 May 2005 14:23:50 -0700 Subject: [NETFILTER]: Don't checksum CHECKSUM_UNNECESSARY skbs in TCP connection tracking Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 2b87c19..721ddbf 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c @@ -819,6 +819,7 @@ static int tcp_error(struct sk_buff *skb, */ /* FIXME: Source route IP option packets --RR */ if (hooknum == NF_IP_PRE_ROUTING + && skb->ip_summed != CHECKSUM_UNNECESSARY && csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP, skb->ip_summed == CHECKSUM_HW ? skb->csum : skb_checksum(skb, iph->ihl*4, tcplen, 0))) { -- cgit v0.10.2 From 679a87382433cf12a28f07a7d5c240f30f0daa08 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 May 2005 14:24:36 -0700 Subject: [IPV6]: Fix raw socket checksums with IPsec I made a mistake in my last patch to the raw socket checksum code. I used the value of inet->cork.length as the length of the payload. While this works with normal packets, it breaks down when IPsec is present since the cork length includes the extension header length. So here is a patch to fix the length calculations. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 1352c1d..617645b 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -455,11 +455,11 @@ csum_copy_err: static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct raw6_sock *rp) { - struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; int err = 0; int offset; int len; + int total_len; u32 tmp_csum; u16 csum; @@ -470,7 +470,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, goto out; offset = rp->offset; - if (offset >= inet->cork.length - 1) { + total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data); + if (offset >= total_len - 1) { err = -EINVAL; ip6_flush_pending_frames(sk); goto out; @@ -514,7 +515,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, tmp_csum = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst, - inet->cork.length, fl->proto, tmp_csum); + total_len, fl->proto, tmp_csum); if (tmp_csum == 0) tmp_csum = -1; -- cgit v0.10.2 From e4553eddae592b948c9695c9a0002169b0cab6fc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 May 2005 14:25:13 -0700 Subject: [IPV6]: Include ipv6.h for ipv6_addr_set This patch includes net/ipv6.h from addrconf.h since it needs ipv6_addr_set. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 7af9a13..f1e5af4 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -46,6 +46,7 @@ struct prefix_info { #include #include #include +#include #define IN6_ADDR_HSIZE 16 -- cgit v0.10.2 From 526bdb80a23b2e10ed4ccc3fcf309c9118d892d6 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 3 May 2005 14:26:01 -0700 Subject: [XFRM]: Prevent off-by-one access to xfrm_dispatch Makes the type > XFRM_MSG_MAX check behave correctly to protect access to xfrm_dispatch. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index f0df02a..4d19b9e 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -140,8 +140,9 @@ enum { XFRM_MSG_FLUSHPOLICY, #define XFRM_MSG_FLUSHPOLICY XFRM_MSG_FLUSHPOLICY - XFRM_MSG_MAX + __XFRM_MSG_MAX }; +#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) struct xfrm_user_tmpl { struct xfrm_id id; -- cgit v0.10.2 From 492b558b3191319cbc859a9e025bc354d336c261 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 3 May 2005 14:26:40 -0700 Subject: [XFRM]: Cleanup xfrm_msg_min and xfrm_dispatch Converts xfrm_msg_min and xfrm_dispatch to use c99 designated initializers to make greping a little bit easier. Also replaces two hardcoded message type with meaningful names. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 4d19b9e..fd2ef74 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -144,6 +144,8 @@ enum { }; #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) +#define XFRM_NR_MSGTYPES (XFRM_MSG_MAX + 1 - XFRM_MSG_BASE) + struct xfrm_user_tmpl { struct xfrm_id id; __u16 family; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 63661b0..52b5843 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -855,47 +855,44 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x return 0; } -static const int xfrm_msg_min[(XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)] = { - NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* NEW SA */ - NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* DEL SA */ - NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* GET SA */ - NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* NEW POLICY */ - NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* DEL POLICY */ - NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* GET POLICY */ - NLMSG_LENGTH(sizeof(struct xfrm_userspi_info)), /* ALLOC SPI */ - NLMSG_LENGTH(sizeof(struct xfrm_user_acquire)), /* ACQUIRE */ - NLMSG_LENGTH(sizeof(struct xfrm_user_expire)), /* EXPIRE */ - NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* UPD POLICY */ - NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* UPD SA */ - NLMSG_LENGTH(sizeof(struct xfrm_user_polexpire)), /* POLEXPIRE */ - NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)), /* FLUSH SA */ - NLMSG_LENGTH(0), /* FLUSH POLICY */ +#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) + +static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { + [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), + [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), + [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), + [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), + [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), + [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), + [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), + [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), + [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), + [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), + [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), + [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), + [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), + [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0), }; +#undef XMSGSIZE + static struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, void **); int (*dump)(struct sk_buff *, struct netlink_callback *); -} xfrm_dispatch[] = { - { .doit = xfrm_add_sa, }, - { .doit = xfrm_del_sa, }, - { - .doit = xfrm_get_sa, - .dump = xfrm_dump_sa, - }, - { .doit = xfrm_add_policy }, - { .doit = xfrm_get_policy }, - { - .doit = xfrm_get_policy, - .dump = xfrm_dump_policy, - }, - { .doit = xfrm_alloc_userspi }, - {}, - {}, - { .doit = xfrm_add_policy }, - { .doit = xfrm_add_sa, }, - {}, - { .doit = xfrm_flush_sa }, - { .doit = xfrm_flush_policy }, +} xfrm_dispatch[XFRM_NR_MSGTYPES] = { + [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, + [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, + [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, + .dump = xfrm_dump_sa }, + [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, + [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, + [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, + .dump = xfrm_dump_policy }, + [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, + [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, + [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, + [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, + [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, }; static int xfrm_done(struct netlink_callback *cb) @@ -931,7 +928,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err return -1; } - if ((type == 2 || type == 5) && (nlh->nlmsg_flags & NLM_F_DUMP)) { + if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || + type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && + (nlh->nlmsg_flags & NLM_F_DUMP)) { u32 rlen; if (link->dump == NULL) -- cgit v0.10.2 From d775fc09f16f4b88cd0373006b112c4772589778 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 3 May 2005 14:27:35 -0700 Subject: [RTNETLINK] Fix RTM_MAX to represent the maximum valid message type RTM_MAX is currently set to the maximum reserverd message type plus one thus being the cause of two bugs for new types being assigned a) given the new family registers only the NEW command in its reserved block the array size for per family entries is calculated one entry short and b) given the new family registers all commands RTM_MAX would point to the first entry of the block following this one and the rtnetlink receive path would accept a message type for a nonexisting family. This patch changes RTM_MAX to point to the maximum valid message type by aligning it to the start of the next block and subtracting one. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 32e5276..d607219 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -89,8 +89,8 @@ enum { RTM_GETANYCAST = 62, #define RTM_GETANYCAST RTM_GETANYCAST - RTM_MAX, -#define RTM_MAX RTM_MAX + __RTM_MAX, +#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; /* -- cgit v0.10.2 From f90a0a74b864fdc46737614f03b8868f4f31e3bf Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 3 May 2005 14:29:00 -0700 Subject: [RTNETLINK] Fix & cleanup rtm_min/rtm_max Converts rtm_min and rtm_max arrays to use c99 designated initializers for easier insertion of new message families. RTM_GETMULTICAST and RTM_GETANYCAST did not have the minimal message size specified which means that the netlink message was parsed for routing attributes starting from the header. Adds the proper minimal message sizes for these messages (netlink header + common rtnetlink header) to fix this issue. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index d607219..1ecaea7 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -93,6 +93,8 @@ enum { #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; +#define RTM_FAM(cmd) (((cmd) - RTM_BASE) >> 2) + /* Generic structure for encapsulation of optional route information. It is reminiscent of sockaddr, but with sa_family replaced diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d8c198e..58a9818 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -88,28 +88,31 @@ struct rtnetlink_link * rtnetlink_links[NPROTO]; static const int rtm_min[(RTM_MAX+1-RTM_BASE)/4] = { - NLMSG_LENGTH(sizeof(struct ifinfomsg)), - NLMSG_LENGTH(sizeof(struct ifaddrmsg)), - NLMSG_LENGTH(sizeof(struct rtmsg)), - NLMSG_LENGTH(sizeof(struct ndmsg)), - NLMSG_LENGTH(sizeof(struct rtmsg)), - NLMSG_LENGTH(sizeof(struct tcmsg)), - NLMSG_LENGTH(sizeof(struct tcmsg)), - NLMSG_LENGTH(sizeof(struct tcmsg)), - NLMSG_LENGTH(sizeof(struct tcamsg)) + [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)), + [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)), + [RTM_FAM(RTM_NEWNEIGH)] = NLMSG_LENGTH(sizeof(struct ndmsg)), + [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct rtmsg)), + [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)), + [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), + [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), + [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), + [RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), + [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), + [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), }; static const int rta_max[(RTM_MAX+1-RTM_BASE)/4] = { - IFLA_MAX, - IFA_MAX, - RTA_MAX, - NDA_MAX, - RTA_MAX, - TCA_MAX, - TCA_MAX, - TCA_MAX, - TCAA_MAX + [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX, + [RTM_FAM(RTM_NEWADDR)] = IFA_MAX, + [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX, + [RTM_FAM(RTM_NEWNEIGH)] = NDA_MAX, + [RTM_FAM(RTM_NEWRULE)] = RTA_MAX, + [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX, + [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX, + [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX, + [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, }; void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data) -- cgit v0.10.2 From db46edc6d3b66bf708a8f23a9aa89f63a49ebe33 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Tue, 3 May 2005 14:29:39 -0700 Subject: [RTNETLINK] Cleanup rtnetlink_link tables Converts remaining rtnetlink_link tables to use c99 designated initializers to make greping a little bit easier. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 1ecaea7..91ac97c 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -93,6 +93,8 @@ enum { #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; +#define RTM_NR_MSGTYPES (RTM_MAX + 1 - RTM_BASE) +#define RTM_NR_FAMILIES (RTM_NR_MSGTYPES >> 2) #define RTM_FAM(cmd) (((cmd) - RTM_BASE) >> 2) /* diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 58a9818..5fb70cf 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -86,7 +86,7 @@ struct sock *rtnl; struct rtnetlink_link * rtnetlink_links[NPROTO]; -static const int rtm_min[(RTM_MAX+1-RTM_BASE)/4] = +static const int rtm_min[RTM_NR_FAMILIES] = { [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)), @@ -102,7 +102,7 @@ static const int rtm_min[(RTM_MAX+1-RTM_BASE)/4] = [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), }; -static const int rta_max[(RTM_MAX+1-RTM_BASE)/4] = +static const int rta_max[RTM_NR_FAMILIES] = { [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX, [RTM_FAM(RTM_NEWADDR)] = IFA_MAX, @@ -641,7 +641,7 @@ static void rtnetlink_rcv(struct sock *sk, int len) } while (rtnl && rtnl->sk_receive_queue.qlen); } -static struct rtnetlink_link link_rtnetlink_table[RTM_MAX-RTM_BASE+1] = +static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = { [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo }, [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink }, diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index c2a0346..e6e23eb 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1411,21 +1411,22 @@ static struct file_operations dn_dev_seq_fops = { #endif /* CONFIG_PROC_FS */ -static struct rtnetlink_link dnet_rtnetlink_table[RTM_MAX-RTM_BASE+1] = +static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] = { - [4] = { .doit = dn_dev_rtm_newaddr, }, - [5] = { .doit = dn_dev_rtm_deladdr, }, - [6] = { .dumpit = dn_dev_dump_ifaddr, }, - + [RTM_NEWADDR - RTM_BASE] = { .doit = dn_dev_rtm_newaddr, }, + [RTM_DELADDR - RTM_BASE] = { .doit = dn_dev_rtm_deladdr, }, + [RTM_GETADDR - RTM_BASE] = { .dumpit = dn_dev_dump_ifaddr, }, #ifdef CONFIG_DECNET_ROUTER - [8] = { .doit = dn_fib_rtm_newroute, }, - [9] = { .doit = dn_fib_rtm_delroute, }, - [10] = { .doit = dn_cache_getroute, .dumpit = dn_fib_dump, }, - [16] = { .doit = dn_fib_rtm_newrule, }, - [17] = { .doit = dn_fib_rtm_delrule, }, - [18] = { .dumpit = dn_fib_dump_rules, }, + [RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, }, + [RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, }, + [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, + .dumpit = dn_fib_dump, }, + [RTM_NEWRULE - RTM_BASE] = { .doit = dn_fib_rtm_newrule, }, + [RTM_DELRULE - RTM_BASE] = { .doit = dn_fib_rtm_delrule, }, + [RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, }, #else - [10] = { .doit = dn_cache_getroute, .dumpit = dn_cache_dump, }, + [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, + .dumpit = dn_cache_dump, #endif }; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index eea7ef0..abbc6d5 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1107,17 +1107,18 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa) } } -static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = { - [4] = { .doit = inet_rtm_newaddr, }, - [5] = { .doit = inet_rtm_deladdr, }, - [6] = { .dumpit = inet_dump_ifaddr, }, - [8] = { .doit = inet_rtm_newroute, }, - [9] = { .doit = inet_rtm_delroute, }, - [10] = { .doit = inet_rtm_getroute, .dumpit = inet_dump_fib, }, +static struct rtnetlink_link inet_rtnetlink_table[RTM_NR_MSGTYPES] = { + [RTM_NEWADDR - RTM_BASE] = { .doit = inet_rtm_newaddr, }, + [RTM_DELADDR - RTM_BASE] = { .doit = inet_rtm_deladdr, }, + [RTM_GETADDR - RTM_BASE] = { .dumpit = inet_dump_ifaddr, }, + [RTM_NEWROUTE - RTM_BASE] = { .doit = inet_rtm_newroute, }, + [RTM_DELROUTE - RTM_BASE] = { .doit = inet_rtm_delroute, }, + [RTM_GETROUTE - RTM_BASE] = { .doit = inet_rtm_getroute, + .dumpit = inet_dump_fib, }, #ifdef CONFIG_IP_MULTIPLE_TABLES - [16] = { .doit = inet_rtm_newrule, }, - [17] = { .doit = inet_rtm_delrule, }, - [18] = { .dumpit = inet_dump_rules, }, + [RTM_NEWRULE - RTM_BASE] = { .doit = inet_rtm_newrule, }, + [RTM_DELRULE - RTM_BASE] = { .doit = inet_rtm_delrule, }, + [RTM_GETRULE - RTM_BASE] = { .dumpit = inet_dump_rules, }, #endif }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7196ac2..7744a25 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3076,7 +3076,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_PREFIX, GFP_ATOMIC); } -static struct rtnetlink_link inet6_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = { +static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = { [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, -- cgit v0.10.2 From 6a5d362120a61a719095443194cc2d5e9a7027dd Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Tue, 3 May 2005 14:33:27 -0700 Subject: [WAN]: kfree of NULL pointer is valid kfree(0) is perfectly valid, checking pointers for NULL before calling kfree() on them is redundant. The patch below cleans away a few such redundant checks (and while I was around some of those bits I couldn't stop myself from making a few tiny whitespace changes as well). Signed-off-by: Jesper Juhl Acked-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index 5b48cd8..02d57c0 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c @@ -436,9 +436,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev, } if (err) { - if (chan->local_addr) - kfree(chan->local_addr); - + kfree(chan->local_addr); kfree(chan); return err; } @@ -458,9 +456,7 @@ static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev) struct cycx_x25_channel *chan = dev->priv; if (chan->svc) { - if (chan->local_addr) - kfree(chan->local_addr); - + kfree(chan->local_addr); if (chan->state == WAN_CONNECTED) del_timer(&chan->timer); } diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 29f84ad..8454bf6 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c @@ -400,10 +400,8 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip) cpc_tty->buf_rx.last = NULL; } - if (cpc_tty->buf_tx) { - kfree(cpc_tty->buf_tx); - cpc_tty->buf_tx = NULL; - } + kfree(cpc_tty->buf_tx); + cpc_tty->buf_tx = NULL; CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name); @@ -666,7 +664,7 @@ static void cpc_tty_rx_work(void * data) unsigned long port; int i, j; st_cpc_tty_area *cpc_tty; - volatile st_cpc_rx_buf * buf; + volatile st_cpc_rx_buf *buf; char flags=0,flg_rx=1; struct tty_ldisc *ld; @@ -680,9 +678,9 @@ static void cpc_tty_rx_work(void * data) cpc_tty = &cpc_tty_area[port]; if ((buf=cpc_tty->buf_rx.first) != 0) { - if(cpc_tty->tty) { + if (cpc_tty->tty) { ld = tty_ldisc_ref(cpc_tty->tty); - if(ld) { + if (ld) { if (ld->receive_buf) { CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name); ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size); @@ -691,7 +689,7 @@ static void cpc_tty_rx_work(void * data) } } cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next; - kfree((unsigned char *)buf); + kfree(buf); buf = cpc_tty->buf_rx.first; flg_rx = 1; } @@ -733,7 +731,7 @@ static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan) void cpc_tty_receive(pc300dev_t *pc300dev) { - st_cpc_tty_area *cpc_tty; + st_cpc_tty_area *cpc_tty; pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan; pc300_t *card = (pc300_t *)pc300chan->card; int ch = pc300chan->channel; @@ -742,7 +740,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev) int rx_len, rx_aux; volatile unsigned char status; unsigned short first_bd = pc300chan->rx_first_bd; - st_cpc_rx_buf *new=NULL; + st_cpc_rx_buf *new = NULL; unsigned char dsr_rx; if (pc300dev->cpc_tty == NULL) { @@ -762,7 +760,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev) if (status & DST_EOM) { break; } - ptdescr=(pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next)); + ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next)); } if (!rx_len) { @@ -771,10 +769,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev) cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), RX_BD_ADDR(ch, pc300chan->rx_last_bd)); } - if (new) { - kfree(new); - new = NULL; - } + kfree(new); return; } @@ -787,7 +782,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev) continue; } - new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC); + new = (st_cpc_rx_buf *)kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC); if (new == 0) { cpc_tty_rx_disc_frame(pc300chan); continue; diff --git a/drivers/net/wan/sdla_chdlc.c b/drivers/net/wan/sdla_chdlc.c index afbe002..496d292 100644 --- a/drivers/net/wan/sdla_chdlc.c +++ b/drivers/net/wan/sdla_chdlc.c @@ -3664,15 +3664,10 @@ static void wanpipe_tty_close(struct tty_struct *tty, struct file * filp) chdlc_disable_comm_shutdown(card); unlock_adapter_irq(&card->wandev.lock,&smp_flags); - if (card->tty_buf){ - kfree(card->tty_buf); - card->tty_buf=NULL; - } - - if (card->tty_rx){ - kfree(card->tty_rx); - card->tty_rx=NULL; - } + kfree(card->tty_buf); + card->tty_buf = NULL; + kfree(card->tty_rx); + card->tty_rx = NULL; } return; } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 8c5cfcb..1c540d8 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -107,13 +107,9 @@ static struct x25_asy *x25_asy_alloc(void) static void x25_asy_free(struct x25_asy *sl) { /* Free all X.25 frame buffers. */ - if (sl->rbuff) { - kfree(sl->rbuff); - } + kfree(sl->rbuff); sl->rbuff = NULL; - if (sl->xbuff) { - kfree(sl->xbuff); - } + kfree(sl->xbuff); sl->xbuff = NULL; if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) { @@ -134,10 +130,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu) { printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n", dev->name); - if (xbuff != NULL) - kfree(xbuff); - if (rbuff != NULL) - kfree(rbuff); + kfree(xbuff); + kfree(rbuff); return -ENOMEM; } @@ -169,10 +163,8 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu) spin_unlock_bh(&sl->lock); - if (xbuff != NULL) - kfree(xbuff); - if (rbuff != NULL) - kfree(rbuff); + kfree(xbuff); + kfree(rbuff); return 0; } -- cgit v0.10.2 From 20cc6befa23bb993cf4a4c58becb1dd99e7fc927 Mon Sep 17 00:00:00 2001 From: Lucas Correia Villa Real Date: Tue, 3 May 2005 14:34:20 -0700 Subject: [PKT_SCHED]: fix typo on Kconfig This is a trivial fix for a typo on Kconfig, where the Generic Random Early Detection algorithm is abbreviated as RED instead of GRED. Signed-off-by: Lucas Correia Villa Real Signed-off-by: David S. Miller diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 9c118ba..b094118 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -185,7 +185,7 @@ config NET_SCH_GRED depends on NET_SCHED help Say Y here if you want to use the Generic Random Early Detection - (RED) packet scheduling algorithm for some of your network devices + (GRED) packet scheduling algorithm for some of your network devices (see the top of for details and references about the algorithm). -- cgit v0.10.2 From 0b2531bdc54e19717de5cb161d57e5ee0a7725ff Mon Sep 17 00:00:00 2001 From: Folkert van Heusden Date: Tue, 3 May 2005 14:36:08 -0700 Subject: [TCP]: Optimize check in port-allocation code. Signed-off-by: Folkert van Heusden Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3ac6659..dad98e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -222,10 +222,13 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) int rover; spin_lock(&tcp_portalloc_lock); - rover = tcp_port_rover; + if (tcp_port_rover < low) + rover = low; + else + rover = tcp_port_rover; do { rover++; - if (rover < low || rover > high) + if (rover > high) rover = low; head = &tcp_bhash[tcp_bhashfn(rover)]; spin_lock(&head->lock); -- cgit v0.10.2 From c3924c70dd3bddc28b99ccd1688bd281bad1a9be Mon Sep 17 00:00:00 2001 From: Folkert van Heusden Date: Tue, 3 May 2005 14:36:45 -0700 Subject: [TCP]: Optimize check in port-allocation code, v6 version. Signed-off-by: Folkert van Heusden Signed-off-by: David S. Miller diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4760c85..0f69e80 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -139,9 +139,12 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) int rover; spin_lock(&tcp_portalloc_lock); - rover = tcp_port_rover; + if (tcp_port_rover < low) + rover = low; + else + rover = tcp_port_rover; do { rover++; - if ((rover < low) || (rover > high)) + if (rover > high) rover = low; head = &tcp_bhash[tcp_bhashfn(rover)]; spin_lock(&head->lock); -- cgit v0.10.2 From 96edf83c4e284c08584f97623f7c7f029759459e Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Tue, 3 May 2005 14:38:09 -0700 Subject: [PPP]: remove redundant NULL pointer checks before kfree & vfree kfree() and vfree() can both deal with NULL pointers. This patch removes redundant NULL pointer checks from the ppp code in drivers/net/ Signed-off-by: Jesper Juhl Signed-off-by: David S. Miller diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 507d632..3872088 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c @@ -87,8 +87,7 @@ static void z_comp_free(void *arg) if (state) { zlib_deflateEnd(&state->strm); - if (state->strm.workspace) - vfree(state->strm.workspace); + vfree(state->strm.workspace); kfree(state); } } @@ -308,8 +307,7 @@ static void z_decomp_free(void *arg) if (state) { zlib_inflateEnd(&state->strm); - if (state->strm.workspace) - kfree(state->strm.workspace); + kfree(state->strm.workspace); kfree(state); } } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index c456dc8..3b377f6 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2467,14 +2467,10 @@ static void ppp_destroy_interface(struct ppp *ppp) skb_queue_purge(&ppp->mrq); #endif /* CONFIG_PPP_MULTILINK */ #ifdef CONFIG_PPP_FILTER - if (ppp->pass_filter) { - kfree(ppp->pass_filter); - ppp->pass_filter = NULL; - } - if (ppp->active_filter) { - kfree(ppp->active_filter); - ppp->active_filter = NULL; - } + kfree(ppp->pass_filter); + ppp->pass_filter = NULL; + kfree(ppp->active_filter); + ppp->active_filter = NULL; #endif /* CONFIG_PPP_FILTER */ kfree(ppp); -- cgit v0.10.2 From 033d899904792d3501b7dd469495ca9138424ec3 Mon Sep 17 00:00:00 2001 From: Asim Shankar Date: Tue, 3 May 2005 14:39:33 -0700 Subject: [PKT_SCHED]: HTB: Drop packet when direct queue is full htb_enqueue(): Free skb and return NET_XMIT_DROP if a packet is destined for the direct_queue but the direct_queue is full. (Before this: erroneously returned NET_XMIT_SUCCESS even though the packet was not enqueued) Signed-off-by: Asim Shankar Signed-off-by: David S. Miller diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index a85935e..558cc08 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -717,6 +717,10 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (q->direct_queue.qlen < q->direct_qlen) { __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; + } else { + kfree_skb(skb); + sch->qstats.drops++; + return NET_XMIT_DROP; } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { -- cgit v0.10.2 From 9dfa277f88388a94993b121db46b80df66f48d9e Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 May 2005 14:41:18 -0700 Subject: [PKT_SCHED]: Fix range in PSCHED_TDIFF_SAFE to 0..bound Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 7352e45..fcb05a3 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -157,7 +157,8 @@ psched_tod_diff(int delta_sec, int bound) case 1: \ __delta += 1000000; \ case 0: \ - __delta = abs(__delta); \ + if (__delta > bound || __delta < 0) \ + __delta = bound; \ } \ __delta; \ }) -- cgit v0.10.2 From 96c36023434b7b6824b1da72a6b7b1ca61d7310c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 May 2005 14:43:27 -0700 Subject: [NETLINK]: cb_lock does not needs ref count on sk Here is a little optimisation for the cb_lock used by netlink_dump. While fixing that race earlier, I noticed that the reference count held by cb_lock is completely useless. The reason is that in order to obtain the protection of the reference count, you have to take the cb_lock. But the only way to take the cb_lock is through dereferencing the socket. That is, you must already possess a reference count on the socket before you can take advantage of the reference count held by cb_lock. As a corollary, we can remve the reference count held by the cb_lock. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 29a5fd2..4ee39206 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -373,7 +373,6 @@ static int netlink_release(struct socket *sock) nlk->cb->done(nlk->cb); netlink_destroy_callback(nlk->cb); nlk->cb = NULL; - __sock_put(sk); } spin_unlock(&nlk->cb_lock); @@ -1099,7 +1098,6 @@ static int netlink_dump(struct sock *sk) spin_unlock(&nlk->cb_lock); netlink_destroy_callback(cb); - __sock_put(sk); return 0; } @@ -1138,7 +1136,6 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, return -EBUSY; } nlk->cb = cb; - sock_hold(sk); spin_unlock(&nlk->cb_lock); netlink_dump(sk); -- cgit v0.10.2 From 2a0a6ebee1d68552152ae8d4aeda91d806995dec Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 May 2005 14:55:09 -0700 Subject: [NETLINK]: Synchronous message processing. Let's recap the problem. The current asynchronous netlink kernel message processing is vulnerable to these attacks: 1) Hit and run: Attacker sends one or more messages and then exits before they're processed. This may confuse/disable the next netlink user that gets the netlink address of the attacker since it may receive the responses to the attacker's messages. Proposed solutions: a) Synchronous processing. b) Stream mode socket. c) Restrict/prohibit binding. 2) Starvation: Because various netlink rcv functions were written to not return until all messages have been processed on a socket, it is possible for these functions to execute for an arbitrarily long period of time. If this is successfully exploited it could also be used to hold rtnl forever. Proposed solutions: a) Synchronous processing. b) Stream mode socket. Firstly let's cross off solution c). It only solves the first problem and it has user-visible impacts. In particular, it'll break user space applications that expect to bind or communicate with specific netlink addresses (pid's). So we're left with a choice of synchronous processing versus SOCK_STREAM for netlink. For the moment I'm sticking with the synchronous approach as suggested by Alexey since it's simpler and I'd rather spend my time working on other things. However, it does have a number of deficiencies compared to the stream mode solution: 1) User-space to user-space netlink communication is still vulnerable. 2) Inefficient use of resources. This is especially true for rtnetlink since the lock is shared with other users such as networking drivers. The latter could hold the rtnl while communicating with hardware which causes the rtnetlink user to wait when it could be doing other things. 3) It is still possible to DoS all netlink users by flooding the kernel netlink receive queue. The attacker simply fills the receive socket with a single netlink message that fills up the entire queue. The attacker then continues to call sendmsg with the same message in a loop. Point 3) can be countered by retransmissions in user-space code, however it is pretty messy. In light of these problems (in particular, point 3), we should implement stream mode netlink at some point. In the mean time, here is a patch that implements synchronous processing. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/kernel/audit.c b/kernel/audit.c index 0f84dd7..ac26d4d 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -427,7 +427,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) /* Get message from skb (based on rtnetlink_rcv_skb). Each message is * processed by audit_receive_msg. Malformed skbs with wrong length are * discarded silently. */ -static int audit_receive_skb(struct sk_buff *skb) +static void audit_receive_skb(struct sk_buff *skb) { int err; struct nlmsghdr *nlh; @@ -436,7 +436,7 @@ static int audit_receive_skb(struct sk_buff *skb) while (skb->len >= NLMSG_SPACE(0)) { nlh = (struct nlmsghdr *)skb->data; if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) - return 0; + return; rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; @@ -446,23 +446,20 @@ static int audit_receive_skb(struct sk_buff *skb) netlink_ack(skb, nlh, 0); skb_pull(skb, rlen); } - return 0; } /* Receive messages from netlink socket. */ static void audit_receive(struct sock *sk, int length) { struct sk_buff *skb; + unsigned int qlen; - if (down_trylock(&audit_netlink_sem)) - return; + down(&audit_netlink_sem); - /* FIXME: this must not cause starvation */ - while ((skb = skb_dequeue(&sk->sk_receive_queue))) { - if (audit_receive_skb(skb) && skb->len) - skb_queue_head(&sk->sk_receive_queue, skb); - else - kfree_skb(skb); + for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { + skb = skb_dequeue(&sk->sk_receive_queue); + audit_receive_skb(skb); + kfree_skb(skb); } up(&audit_netlink_sem); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5fb70cf..6e1ab1e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -609,26 +609,31 @@ static inline int rtnetlink_rcv_skb(struct sk_buff *skb) /* * rtnetlink input queue processing routine: - * - try to acquire shared lock. If it is failed, defer processing. + * - process as much as there was in the queue upon entry. * - feed skbs to rtnetlink_rcv_skb, until it refuse a message, - * that will occur, when a dump started and/or acquisition of - * exclusive lock failed. + * that will occur, when a dump started. */ static void rtnetlink_rcv(struct sock *sk, int len) { + unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); + do { struct sk_buff *skb; - if (rtnl_shlock_nowait()) - return; + rtnl_lock(); + + if (qlen > skb_queue_len(&sk->sk_receive_queue)) + qlen = skb_queue_len(&sk->sk_receive_queue); - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + while (qlen--) { + skb = skb_dequeue(&sk->sk_receive_queue); if (rtnetlink_rcv_skb(skb)) { - if (skb->len) + if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); - else + qlen++; + } else kfree_skb(skb); break; } @@ -638,7 +643,7 @@ static void rtnetlink_rcv(struct sock *sk, int len) up(&rtnl_sem); netdev_run_todo(); - } while (rtnl && rtnl->sk_receive_queue.qlen); + } while (qlen); } static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index f86a625..101ddef 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -119,8 +119,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) static void dnrmg_receive_user_sk(struct sock *sk, int len) { struct sk_buff *skb; + unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); - while((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) { dnrmg_receive_user_skb(skb); kfree_skb(skb); } diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 9e40dff..e5746b6 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -546,20 +546,18 @@ ipq_rcv_skb(struct sk_buff *skb) static void ipq_rcv_sk(struct sock *sk, int len) { - do { - struct sk_buff *skb; + struct sk_buff *skb; + unsigned int qlen; - if (down_trylock(&ipqnl_sem)) - return; + down(&ipqnl_sem); - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { - ipq_rcv_skb(skb); - kfree_skb(skb); - } + for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { + skb = skb_dequeue(&sk->sk_receive_queue); + ipq_rcv_skb(skb); + kfree_skb(skb); + } - up(&ipqnl_sem); - - } while (ipqnl && ipqnl->sk_receive_queue.qlen); + up(&ipqnl_sem); } static int diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 313c140..8faa894 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -777,8 +777,9 @@ static inline void tcpdiag_rcv_skb(struct sk_buff *skb) static void tcpdiag_rcv(struct sock *sk, int len) { struct sk_buff *skb; + unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) { tcpdiag_rcv_skb(skb); kfree_skb(skb); } diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index c54830b..750943e 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -549,20 +549,18 @@ ipq_rcv_skb(struct sk_buff *skb) static void ipq_rcv_sk(struct sock *sk, int len) { - do { - struct sk_buff *skb; + struct sk_buff *skb; + unsigned int qlen; - if (down_trylock(&ipqnl_sem)) - return; + down(&ipqnl_sem); - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { - ipq_rcv_skb(skb); - kfree_skb(skb); - } + for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { + skb = skb_dequeue(&sk->sk_receive_queue); + ipq_rcv_skb(skb); + kfree_skb(skb); + } - up(&ipqnl_sem); - - } while (ipqnl && ipqnl->sk_receive_queue.qlen); + up(&ipqnl_sem); } static int diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 52b5843..dab112f 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1008,17 +1008,24 @@ static int xfrm_user_rcv_skb(struct sk_buff *skb) static void xfrm_netlink_rcv(struct sock *sk, int len) { + unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); + do { struct sk_buff *skb; down(&xfrm_cfg_sem); - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { + if (qlen > skb_queue_len(&sk->sk_receive_queue)) + qlen = skb_queue_len(&sk->sk_receive_queue); + + while (qlen--) { + skb = skb_dequeue(&sk->sk_receive_queue); if (xfrm_user_rcv_skb(skb)) { - if (skb->len) + if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); - else + qlen++; + } else kfree_skb(skb); break; } @@ -1027,7 +1034,7 @@ static void xfrm_netlink_rcv(struct sock *sk, int len) up(&xfrm_cfg_sem); - } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen); + } while (qlen); } static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard) -- cgit v0.10.2 From a493820df65909d344824499937c939150559ace Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 3 May 2005 22:57:56 +0100 Subject: [PATCH] ARM: 2661/1: imxfb include Patch from Sascha Hauer This patch adds the missing include files for the i.MX framebuffer driver. Signed-off-by: Sascha Hauer Signed-off-by: Russell King diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c index 54377d0..41e5849 100644 --- a/arch/arm/mach-imx/generic.c +++ b/arch/arm/mach-imx/generic.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -228,6 +229,14 @@ static struct platform_device imx_uart2_device = { .resource = imx_uart2_resources, }; +static struct imxfb_mach_info imx_fb_info; + +void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info) +{ + memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info)); +} +EXPORT_SYMBOL(set_imx_fb_info); + static struct resource imxfb_resources[] = { [0] = { .start = 0x00205000, @@ -241,9 +250,16 @@ static struct resource imxfb_resources[] = { }, }; +static u64 fb_dma_mask = ~(u64)0; + static struct platform_device imxfb_device = { .name = "imx-fb", .id = 0, + .dev = { + .platform_data = &imx_fb_info, + .dma_mask = &fb_dma_mask, + .coherent_dma_mask = 0xffffffff, + }, .num_resources = ARRAY_SIZE(imxfb_resources), .resource = imxfb_resources, }; -- cgit v0.10.2 From 48af7215405215e81e72aba1ae8031ca2fea840c Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 3 May 2005 22:57:56 +0100 Subject: [PATCH] ARM: 2662/1: missing "default y" for CONFIG_HAS_TLS_REG Patch from Nicolas Pitre Signed-off-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 007766a..27892e3 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -413,6 +413,7 @@ config CPU_BPREDICT_DISABLE config HAS_TLS_REG bool depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3 + default y help This selects support for the CP15 thread register. It is defined to be available on ARMv6 or later. However -- cgit v0.10.2 From 09e14305982efc2f3b509d3c50ef5dcbff64a998 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 3 May 2005 15:30:05 -0700 Subject: [NETLINK]: Fix infinite loops in synchronous netlink changes. The qlen should continue to decrement, even if we pop partially processed SKBs back onto the receive queue. Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 6e1ab1e..75b6d33 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -626,14 +626,13 @@ static void rtnetlink_rcv(struct sock *sk, int len) if (qlen > skb_queue_len(&sk->sk_receive_queue)) qlen = skb_queue_len(&sk->sk_receive_queue); - while (qlen--) { + for (; qlen; qlen--) { skb = skb_dequeue(&sk->sk_receive_queue); if (rtnetlink_rcv_skb(skb)) { - if (skb->len) { + if (skb->len) skb_queue_head(&sk->sk_receive_queue, skb); - qlen++; - } else + else kfree_skb(skb); break; } diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 101ddef..284a999 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -121,7 +121,7 @@ static void dnrmg_receive_user_sk(struct sock *sk, int len) struct sk_buff *skb; unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); - while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) { + for (; qlen && (skb = skb_dequeue(&sk->sk_receive_queue)); qlen--) { dnrmg_receive_user_skb(skb); kfree_skb(skb); } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index dab112f..e8740a4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1018,14 +1018,13 @@ static void xfrm_netlink_rcv(struct sock *sk, int len) if (qlen > skb_queue_len(&sk->sk_receive_queue)) qlen = skb_queue_len(&sk->sk_receive_queue); - while (qlen--) { + for (; qlen; qlen--) { skb = skb_dequeue(&sk->sk_receive_queue); if (xfrm_user_rcv_skb(skb)) { - if (skb->len) { + if (skb->len) skb_queue_head(&sk->sk_receive_queue, skb); - qlen++; - } else + else kfree_skb(skb); break; } -- cgit v0.10.2 From 8df5a500a3e97f7811cdce0f553ca1917ccd4220 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 11 Apr 2005 13:45:00 -0700 Subject: [IA64] perfmon & PAL_HALT again The pmu_active test is based on the values of PSR.up. THIS IS THE PROBLEM as it does not take into account the lazy restore logic which is as follow (simplified): context switch out: save PMDs clear psr.up release ownership context switch in: if (ctx->last_cpu == smp_processor_id() && ctx->cpu_activation == cpu_activation) { set psr.up return } restore PMD restore PMC ctx->last_cpu = smp_processor_id(); ctx->activation = ++cpu_activation; set psr.up The key here is that on context switch out, we clear psr.up and on context switch in we check if nobody else used the PMU on that processor since last time we came. In that case, we assume the PMD/PMC are ours and we simply reactivate. The Caliper problem is that between the moment we context switch out and the moment we come back, nobody effectively used the PMU BUT the processor went idle. Normally this would have no incidence but PAL_HALT does alter the PMU registers. In default_idle(), the test on psr.up is not strong enough to cover this case and we go into PAL which trashed the PMU resgisters. When we come back we falsely assume that this is our state yet it is corrupted. Very nasty indeed. To avoid the problem it is necessary to forbid going to PAL_HALT as soon as perfmon installs some valid state in the PMU registers. This happens with an application attaches a context to a thread or CPU. It is not enough to check the psr/dcr bits. Hence I propose the attached patch. It adds a callback in process.c to modify the condition to enter PAL on idle. Basically, now it is conditional to pal_halt=1 AND perfmon saying it is okay. Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 376fcbc..fd4f3be 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1265,6 +1265,8 @@ out: } EXPORT_SYMBOL(pfm_unregister_buffer_fmt); +extern void update_pal_halt_status(int); + static int pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) { @@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) is_syswide, cpu)); + /* + * disable default_idle() to go to PAL_HALT + */ + update_pal_halt_status(0); + UNLOCK_PFS(flags); return 0; @@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) is_syswide, cpu)); + /* + * if possible, enable default_idle() to go into PAL_HALT + */ + if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) + update_pal_halt_status(1); + UNLOCK_PFS(flags); return 0; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c0140f4..474d75f 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall ia64_do_signal(oldset, scr, in_syscall); } -static int pal_halt = 1; +static int pal_halt = 1; +static int can_do_pal_halt = 1; + static int __init nohalt_setup(char * str) { pal_halt = 0; @@ -181,16 +183,22 @@ static int __init nohalt_setup(char * str) } __setup("nohalt", nohalt_setup); +int +update_pal_halt_status(int status) +{ + can_do_pal_halt = pal_halt && status; +} + /* * We use this if we don't have any better idle routine.. */ void default_idle (void) { - unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP); + int can_do_pal; while (!need_resched()) - if (pal_halt && !pmu_active) + if (can_do_pal_halt) safe_halt(); else cpu_relax(); -- cgit v0.10.2 From a5a70b75d93b26e14c0c5e759099d602a480b9e2 Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Mon, 18 Apr 2005 11:42:00 -0700 Subject: [IA64] another perfmon fix (take2) - pfm_context_load(): change return value from EINVAL to EBUSY when context is already loaded. - pfm_check_task_state(): pass test if context state is MASKED. It is safe to give access on PFM_CTX_MASKED because the PMU state (PMD) is stable and saved in software state. This helps multiplexing programs such as the example given in libpfm-3.1. Signed-off-by: stephane eranian Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index fd4f3be..71c1016 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -4215,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", req->load_pid, ctx->ctx_state)); - return -EINVAL; + return -EBUSY; } DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); @@ -4717,16 +4717,26 @@ recheck: if (task == current || ctx->ctx_fl_system) return 0; /* - * if context is UNLOADED we are safe to go - */ - if (state == PFM_CTX_UNLOADED) return 0; - - /* - * no command can operate on a zombie context + * we are monitoring another thread */ - if (state == PFM_CTX_ZOMBIE) { - DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); - return -EINVAL; + switch(state) { + case PFM_CTX_UNLOADED: + /* + * if context is UNLOADED we are safe to go + */ + return 0; + case PFM_CTX_ZOMBIE: + /* + * no command can operate on a zombie context + */ + DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); + return -EINVAL; + case PFM_CTX_MASKED: + /* + * PMU state has been saved to software even though + * the thread may still be running. + */ + if (cmd != PFM_UNLOAD_CONTEXT) return 0; } /* -- cgit v0.10.2 From 0f4821e7b93fe72e89b8ff393bd8e705bd178aa5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 3 May 2005 16:15:59 -0700 Subject: [XFRM/RTNETLINK]: Decrement qlen properly in {xfrm_,rt}netlink_rcv(). If we free up a partially processed packet because it's skb->len dropped to zero, we need to decrement qlen because we are dropping out of the top-level loop so it will do the decrement for us. Spotted by Herbert Xu. Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 75b6d33..00caf4b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -632,8 +632,10 @@ static void rtnetlink_rcv(struct sock *sk, int len) if (skb->len) skb_queue_head(&sk->sk_receive_queue, skb); - else + else { kfree_skb(skb); + qlen--; + } break; } kfree_skb(skb); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e8740a4..5ddda2c 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1024,8 +1024,10 @@ static void xfrm_netlink_rcv(struct sock *sk, int len) if (skb->len) skb_queue_head(&sk->sk_receive_queue, skb); - else + else { kfree_skb(skb); + qlen--; + } break; } kfree_skb(skb); -- cgit v0.10.2 From cacaddf57ed4d5ca994e9a7e2bd5558061f5d89d Mon Sep 17 00:00:00 2001 From: "Tommy S. Christensen" Date: Tue, 3 May 2005 16:18:52 -0700 Subject: [NET]: Disable queueing when carrier is lost. Some network drivers call netif_stop_queue() when detecting loss of carrier. This leads to packets being queued up at the qdisc level for an unbound period of time. In order to prevent this effect, the core networking stack will now cease to queue packets for any device, that is operationally down (i.e. the queue is flushed and disabled). Signed-off-by: Tommy S. Christensen Acked-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4859b74..d43d120 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -74,6 +75,12 @@ void linkwatch_run_queue(void) clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); if (dev->flags & IFF_UP) { + if (netif_carrier_ok(dev)) { + WARN_ON(dev->qdisc_sleeping == &noop_qdisc); + dev_activate(dev); + } else + dev_deactivate(dev); + netdev_state_change(dev); } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8c01e02..9a2f8e4 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -539,6 +539,10 @@ void dev_activate(struct net_device *dev) write_unlock_bh(&qdisc_tree_lock); } + if (!netif_carrier_ok(dev)) + /* Delay activation until next carrier-on event */ + return; + spin_lock_bh(&dev->queue_lock); rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); if (dev->qdisc != &noqueue_qdisc) { -- cgit v0.10.2 From e4f8ab00cf3599ecb8110c0a838cd15d013b79e5 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 May 2005 16:20:39 -0700 Subject: [NETFILTER]: Fix nf_debug_ip_local_deliver() Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller diff --git a/net/core/netfilter.c b/net/core/netfilter.c index e51cfa4..92c5182 100644 --- a/net/core/netfilter.c +++ b/net/core/netfilter.c @@ -217,21 +217,10 @@ void nf_debug_ip_local_deliver(struct sk_buff *skb) * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */ if (!skb->dev) { printk("ip_local_deliver: skb->dev is NULL.\n"); - } - else if (strcmp(skb->dev->name, "lo") == 0) { - if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT) - | (1 << NF_IP_POST_ROUTING) - | (1 << NF_IP_PRE_ROUTING) - | (1 << NF_IP_LOCAL_IN))) { - printk("ip_local_deliver: bad loopback skb: "); - debug_print_hooks_ip(skb->nf_debug); - nf_dump_skb(PF_INET, skb); - } - } - else { + } else { if (skb->nf_debug != ((1<nf_debug); nf_dump_skb(PF_INET, skb); } -- cgit v0.10.2 From bd96535b81ad09d7593cc75093534acb984d3dc9 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 May 2005 16:21:37 -0700 Subject: [NETFILTER]: Drop conntrack reference in ip_dev_loopback_xmit() Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller diff --git a/net/core/netfilter.c b/net/core/netfilter.c index 92c5182..22a8f12 100644 --- a/net/core/netfilter.c +++ b/net/core/netfilter.c @@ -236,8 +236,6 @@ void nf_debug_ip_loopback_xmit(struct sk_buff *newskb) debug_print_hooks_ip(newskb->nf_debug); nf_dump_skb(PF_INET, newskb); } - /* Clear to avoid confusing input check */ - newskb->nf_debug = 0; } void nf_debug_ip_finish_output2(struct sk_buff *skb) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 38f6953..24fe3e0 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -111,6 +111,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) #ifdef CONFIG_NETFILTER_DEBUG nf_debug_ip_loopback_xmit(newskb); #endif + nf_reset(newskb); netif_rx(newskb); return 0; } -- cgit v0.10.2 From a71f62edc935fbdc346c5b16bea19f1480d29635 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 3 May 2005 16:21:45 -0700 Subject: [IA64] Fix two warnings introduced by perfmon patches. Signed-off-by: Tony Luck diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 474d75f..ebb71f3 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -183,7 +183,7 @@ static int __init nohalt_setup(char * str) } __setup("nohalt", nohalt_setup); -int +void update_pal_halt_status(int status) { can_do_pal_halt = pal_halt && status; @@ -195,8 +195,6 @@ update_pal_halt_status(int status) void default_idle (void) { - int can_do_pal; - while (!need_resched()) if (can_do_pal_halt) safe_halt(); -- cgit v0.10.2 From 8cbe1d46d69f9e2c49f284fe0e9aee3387bd2c71 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 3 May 2005 16:24:03 -0700 Subject: [PKT_SCHED]: netetm: trap infinite loop hange on qlen underflow Due to bugs in netem (fixed by later patches), it is possible to get qdisc qlen to go negative. If this happens the CPU ends up spinning forever in qdisc_run(). So add a BUG_ON() to trap it. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 9a2f8e4..87e48a4 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -179,6 +179,7 @@ requeue: netif_schedule(dev); return 1; } + BUG_ON((int) q->q.qlen < 0); return q->q.qlen; } -- cgit v0.10.2 From 771018e76aaa6474be20a53c20458bcae8b00485 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 3 May 2005 16:24:32 -0700 Subject: [PKT_SCHED]: netetm: make qdisc friendly to outer disciplines Netem currently dumps packets into the queue when timer expires. This patch makes work by self-clocking (more like TBF). It fixes a bug when 0 delay is requested (only doing loss or duplication). Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 31c29de..864b8d3 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -138,38 +138,78 @@ static long tabledist(unsigned long mu, long sigma, } /* Put skb in the private delayed queue. */ -static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) +static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) { struct netem_sched_data *q = qdisc_priv(sch); - struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; psched_tdiff_t td; psched_time_t now; PSCHED_GET_TIME(now); td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); - PSCHED_TADD2(now, td, cb->time_to_send); /* Always queue at tail to keep packets in order */ if (likely(q->delayed.qlen < q->limit)) { + struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; + + PSCHED_TADD2(now, td, cb->time_to_send); + + pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, + now, cb->time_to_send); + __skb_queue_tail(&q->delayed, skb); - if (!timer_pending(&q->timer)) { - q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); - add_timer(&q->timer); - } return NET_XMIT_SUCCESS; } + pr_debug("netem_delay: queue over limit %d\n", q->limit); + sch->qstats.overlimits++; kfree_skb(skb); return NET_XMIT_DROP; } +/* + * Move a packet that is ready to send from the delay holding + * list to the underlying qdisc. + */ +static int netem_run(struct Qdisc *sch) +{ + struct netem_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + psched_time_t now; + + PSCHED_GET_TIME(now); + + skb = skb_peek(&q->delayed); + if (skb) { + const struct netem_skb_cb *cb + = (const struct netem_skb_cb *)skb->cb; + long delay + = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); + pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); + + /* if more time remaining? */ + if (delay > 0) { + mod_timer(&q->timer, jiffies + delay); + return 1; + } + + __skb_unlink(skb, &q->delayed); + + if (q->qdisc->enqueue(skb, q->qdisc)) { + sch->q.qlen--; + sch->qstats.drops++; + } + } + + return 0; +} + static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb2; int ret; - pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); + pr_debug("netem_enqueue skb=%p\n", skb); /* Random packet drop 0 => none, ~0 => all */ if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { @@ -184,7 +224,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { pr_debug("netem_enqueue: dup %p\n", skb2); - if (delay_skb(sch, skb2)) { + if (netem_delay(sch, skb2)) { sch->q.qlen++; sch->bstats.bytes += skb2->len; sch->bstats.packets++; @@ -202,7 +242,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = q->qdisc->enqueue(skb, q->qdisc); } else { q->counter = 0; - ret = delay_skb(sch, skb); + ret = netem_delay(sch, skb); + netem_run(sch); } if (likely(ret == NET_XMIT_SUCCESS)) { @@ -241,56 +282,35 @@ static unsigned int netem_drop(struct Qdisc* sch) return len; } -/* Dequeue packet. - * Move all packets that are ready to send from the delay holding - * list to the underlying qdisc, then just call dequeue - */ static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; + int pending; + + pending = netem_run(sch); skb = q->qdisc->dequeue(q->qdisc); - if (skb) + if (skb) { + pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; + sch->flags &= ~TCQ_F_THROTTLED; + } + else if (pending) { + pr_debug("netem_dequeue: throttling\n"); + sch->flags |= TCQ_F_THROTTLED; + } + return skb; } static void netem_watchdog(unsigned long arg) { struct Qdisc *sch = (struct Qdisc *)arg; - struct netem_sched_data *q = qdisc_priv(sch); - struct net_device *dev = sch->dev; - struct sk_buff *skb; - psched_time_t now; - pr_debug("netem_watchdog: fired @%lu\n", jiffies); - - spin_lock_bh(&dev->queue_lock); - PSCHED_GET_TIME(now); - - while ((skb = skb_peek(&q->delayed)) != NULL) { - const struct netem_skb_cb *cb - = (const struct netem_skb_cb *)skb->cb; - long delay - = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); - pr_debug("netem_watchdog: skb %p@%lu %ld\n", - skb, jiffies, delay); - - /* if more time remaining? */ - if (delay > 0) { - mod_timer(&q->timer, jiffies + delay); - break; - } - __skb_unlink(skb, &q->delayed); - - if (q->qdisc->enqueue(skb, q->qdisc)) { - sch->q.qlen--; - sch->qstats.drops++; - } - } - qdisc_run(dev); - spin_unlock_bh(&dev->queue_lock); + pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen); + sch->flags &= ~TCQ_F_THROTTLED; + netif_schedule(sch->dev); } static void netem_reset(struct Qdisc *sch) @@ -301,6 +321,7 @@ static void netem_reset(struct Qdisc *sch) skb_queue_purge(&q->delayed); sch->q.qlen = 0; + sch->flags &= ~TCQ_F_THROTTLED; del_timer_sync(&q->timer); } -- cgit v0.10.2 From d5d75cd6b10ddad2f375b61092754474ad78aec7 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 3 May 2005 16:24:57 -0700 Subject: [PKT_SCHED]: netetm: adjust parent qlen when duplicating Fix qlen underrun when doing duplication with netem. If netem is used as leaf discipline, then the parent needs to be tweaked when packets are duplicated. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4323a74..07977f8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1289,6 +1289,7 @@ static int __init pktsched_init(void) subsys_initcall(pktsched_init); +EXPORT_SYMBOL(qdisc_lookup); EXPORT_SYMBOL(qdisc_get_rtab); EXPORT_SYMBOL(qdisc_put_rtab); EXPORT_SYMBOL(register_qdisc); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 864b8d3..e0c9fbe 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -206,7 +206,6 @@ static int netem_run(struct Qdisc *sch) static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); - struct sk_buff *skb2; int ret; pr_debug("netem_enqueue skb=%p\n", skb); @@ -220,11 +219,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } /* Random duplication */ - if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor) - && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { - pr_debug("netem_enqueue: dup %p\n", skb2); + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { + struct sk_buff *skb2; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { + struct Qdisc *qp; + + /* Since one packet can generate two packets in the + * queue, the parent's qlen accounting gets confused, + * so fix it. + */ + qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent)); + if (qp) + qp->q.qlen++; - if (netem_delay(sch, skb2)) { sch->q.qlen++; sch->bstats.bytes += skb2->len; sch->bstats.packets++; @@ -253,6 +262,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } else sch->qstats.drops++; + pr_debug("netem: enqueue ret %d\n", ret); return ret; } -- cgit v0.10.2 From aabc9761b69f1bfa30a78f7005be95cc9cc06175 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 May 2005 16:27:10 -0700 Subject: [IPSEC]: Store idev entries I found a bug that stopped IPsec/IPv6 from working. About a month ago IPv6 started using rt6i_idev->dev on the cached socket dst entries. If the cached socket dst entry is IPsec, then rt6i_idev will be NULL. Since we want to look at the rt6i_idev of the original route in this case, the easiest fix is to store rt6i_idev in the IPsec dst entry just as we do for a number of other IPv6 route attributes. Unfortunately this means that we need some new code to handle the references to rt6i_idev. That's why this patch is bigger than it would otherwise be. I've also done the same thing for IPv4 since it is conceivable that once these idev attributes start getting used for accounting, we probably need to dereference them for IPv4 IPsec entries too. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 73e9a8c..e142a25 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1,6 +1,7 @@ #ifndef _NET_XFRM_H #define _NET_XFRM_H +#include #include #include #include @@ -516,6 +517,15 @@ struct xfrm_dst u32 child_mtu_cached; }; +static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) +{ + dst_release(xdst->route); + if (likely(xdst->u.dst.xfrm)) + xfrm_state_put(xdst->u.dst.xfrm); +} + +extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); + /* Decapsulation state, used by the input to store data during * decapsulation procedure, to be used later (during the policy * check diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 7fe2afd..b2b60f3 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -8,7 +8,10 @@ * */ +#include +#include #include +#include #include #include @@ -152,6 +155,8 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int x->u.rt.rt_dst = rt0->rt_dst; x->u.rt.rt_gateway = rt->rt_gateway; x->u.rt.rt_spec_dst = rt0->rt_spec_dst; + x->u.rt.idev = rt0->idev; + in_dev_hold(rt0->idev); header_len -= x->u.dst.xfrm->props.header_len; trailer_len -= x->u.dst.xfrm->props.trailer_len; } @@ -243,11 +248,48 @@ static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) path->ops->update_pmtu(path, mtu); } +static void xfrm4_dst_destroy(struct dst_entry *dst) +{ + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; + + if (likely(xdst->u.rt.idev)) + in_dev_put(xdst->u.rt.idev); + xfrm_dst_destroy(xdst); +} + +static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, + int unregister) +{ + struct xfrm_dst *xdst; + + if (!unregister) + return; + + xdst = (struct xfrm_dst *)dst; + if (xdst->u.rt.idev->dev == dev) { + struct in_device *loopback_idev = in_dev_get(&loopback_dev); + BUG_ON(!loopback_idev); + + do { + in_dev_put(xdst->u.rt.idev); + xdst->u.rt.idev = loopback_idev; + in_dev_hold(loopback_idev); + xdst = (struct xfrm_dst *)xdst->u.dst.child; + } while (xdst->u.dst.xfrm); + + __in_dev_put(loopback_idev); + } + + xfrm_dst_ifdown(dst, dev); +} + static struct dst_ops xfrm4_dst_ops = { .family = AF_INET, .protocol = __constant_htons(ETH_P_IP), .gc = xfrm4_garbage_collect, .update_pmtu = xfrm4_update_pmtu, + .destroy = xfrm4_dst_destroy, + .ifdown = xfrm4_dst_ifdown, .gc_thresh = 1024, .entry_size = sizeof(struct xfrm_dst), }; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8a4f37d..4429b1a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -11,7 +11,11 @@ * */ +#include +#include #include +#include +#include #include #include #include @@ -166,6 +170,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway)); x->u.rt6.rt6i_dst = rt0->rt6i_dst; x->u.rt6.rt6i_src = rt0->rt6i_src; + x->u.rt6.rt6i_idev = rt0->rt6i_idev; + in6_dev_hold(rt0->rt6i_idev); header_len -= x->u.dst.xfrm->props.header_len; trailer_len -= x->u.dst.xfrm->props.trailer_len; } @@ -251,11 +257,48 @@ static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) path->ops->update_pmtu(path, mtu); } +static void xfrm6_dst_destroy(struct dst_entry *dst) +{ + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; + + if (likely(xdst->u.rt6.rt6i_idev)) + in6_dev_put(xdst->u.rt6.rt6i_idev); + xfrm_dst_destroy(xdst); +} + +static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, + int unregister) +{ + struct xfrm_dst *xdst; + + if (!unregister) + return; + + xdst = (struct xfrm_dst *)dst; + if (xdst->u.rt6.rt6i_idev->dev == dev) { + struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev); + BUG_ON(!loopback_idev); + + do { + in6_dev_put(xdst->u.rt6.rt6i_idev); + xdst->u.rt6.rt6i_idev = loopback_idev; + in6_dev_hold(loopback_idev); + xdst = (struct xfrm_dst *)xdst->u.dst.child; + } while (xdst->u.dst.xfrm); + + __in6_dev_put(loopback_idev); + } + + xfrm_dst_ifdown(dst, dev); +} + static struct dst_ops xfrm6_dst_ops = { .family = AF_INET6, .protocol = __constant_htons(ETH_P_IPV6), .gc = xfrm6_garbage_collect, .update_pmtu = xfrm6_update_pmtu, + .destroy = xfrm6_dst_destroy, + .ifdown = xfrm6_dst_ifdown, .gc_thresh = 1024, .entry_size = sizeof(struct xfrm_dst), }; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8082807..55ed979 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1028,30 +1028,15 @@ static int stale_bundle(struct dst_entry *dst) return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC); } -static void xfrm_dst_destroy(struct dst_entry *dst) +void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) { - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - - dst_release(xdst->route); - - if (!dst->xfrm) - return; - xfrm_state_put(dst->xfrm); - dst->xfrm = NULL; -} - -static void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev, - int unregister) -{ - if (!unregister) - return; - while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { dst->dev = &loopback_dev; dev_hold(&loopback_dev); dev_put(dev); } } +EXPORT_SYMBOL(xfrm_dst_ifdown); static void xfrm_link_failure(struct sk_buff *skb) { @@ -1262,10 +1247,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->kmem_cachep = xfrm_dst_cache; if (likely(dst_ops->check == NULL)) dst_ops->check = xfrm_dst_check; - if (likely(dst_ops->destroy == NULL)) - dst_ops->destroy = xfrm_dst_destroy; - if (likely(dst_ops->ifdown == NULL)) - dst_ops->ifdown = xfrm_dst_ifdown; if (likely(dst_ops->negative_advice == NULL)) dst_ops->negative_advice = xfrm_negative_advice; if (likely(dst_ops->link_failure == NULL)) @@ -1297,8 +1278,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) xfrm_policy_afinfo[afinfo->family] = NULL; dst_ops->kmem_cachep = NULL; dst_ops->check = NULL; - dst_ops->destroy = NULL; - dst_ops->ifdown = NULL; dst_ops->negative_advice = NULL; dst_ops->link_failure = NULL; dst_ops->get_mss = NULL; -- cgit v0.10.2 From c4b07b7b3693228c7b848c00a9d4bbb49506a45f Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 3 May 2005 16:27:44 -0700 Subject: [IA64] Update arch/ia64/configs/tiger_defconfig Kristen did most of the checking, bring this up to -rc2. Signed-off-by: Tony Luck diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 99830e8..9086b78 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.11-rc2 -# Sat Jan 22 11:17:02 2005 +# Linux kernel version: 2.6.12-rc3 +# Tue May 3 15:55:04 2005 # # @@ -10,6 +10,7 @@ CONFIG_EXPERIMENTAL=y CONFIG_CLEAN_COMPILE=y CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 # # General setup @@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_SYSCTL=y # CONFIG_AUDIT is not set -CONFIG_LOG_BUF_SHIFT=20 CONFIG_HOTPLUG=y CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +# CONFIG_CPUSETS is not set # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y # CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y CONFIG_FUTEX=y CONFIG_EPOLL=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SHMEM=y CONFIG_CC_ALIGN_FUNCTIONS=0 CONFIG_CC_ALIGN_LABELS=0 CONFIG_CC_ALIGN_LOOPS=0 CONFIG_CC_ALIGN_JUMPS=0 # CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 # # Loadable module support @@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18 CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_HOTPLUG_CPU=y +# CONFIG_SCHED_SMT is not set # CONFIG_PREEMPT is not set CONFIG_HAVE_DEC_LOCK=y CONFIG_IA32_SUPPORT=y @@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y # CONFIG_PCI_MSI is not set CONFIG_PCI_LEGACY_PROC=y CONFIG_PCI_NAMES=y +# CONFIG_PCI_DEBUG is not set # # PCI Hotplug Support @@ -152,10 +158,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m # CONFIG_PCCARD is not set # -# PC-card bridges -# - -# # Device Drivers # @@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m # CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_UB is not set -CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set @@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y # CONFIG_SCSI_BUSLOGIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_EATA is not set -# CONFIG_SCSI_EATA_PIO is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_GDTH is not set # CONFIG_SCSI_IPS is not set @@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set # CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_ISP is not set CONFIG_SCSI_QLOGIC_FC=y # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set CONFIG_SCSI_QLOGIC_1280=y @@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m CONFIG_SCSI_QLA2300=m CONFIG_SCSI_QLA2322=m # CONFIG_SCSI_QLA6312 is not set +# CONFIG_SCSI_LPFC is not set # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DEBUG is not set @@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m +# CONFIG_DM_MULTIPATH is not set # # Fusion MPT device support @@ -386,7 +389,6 @@ CONFIG_NET=y # CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set -CONFIG_NETLINK_DEV=y CONFIG_UNIX=y # CONFIG_NET_KEY is not set CONFIG_INET=y @@ -446,7 +448,6 @@ CONFIG_DUMMY=m # CONFIG_BONDING is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set -# CONFIG_ETHERTAP is not set # # ARCnet devices @@ -484,7 +485,6 @@ CONFIG_NET_PCI=y # CONFIG_DGRS is not set CONFIG_EEPRO100=m CONFIG_E100=m -# CONFIG_E100_NAPI is not set # CONFIG_FEALNX is not set # CONFIG_NATSEMI is not set # CONFIG_NE2K_PCI is not set @@ -566,25 +566,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # CONFIG_INPUT_EVBUG is not set # -# Input I/O drivers -# -CONFIG_GAMEPORT=m -CONFIG_SOUND_GAMEPORT=m -# CONFIG_GAMEPORT_NS558 is not set -# CONFIG_GAMEPORT_L4 is not set -# CONFIG_GAMEPORT_EMU10K1 is not set -# CONFIG_GAMEPORT_VORTEX is not set -# CONFIG_GAMEPORT_FM801 is not set -# CONFIG_GAMEPORT_CS461X is not set -CONFIG_SERIO=y -CONFIG_SERIO_I8042=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set - -# # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y @@ -602,6 +583,24 @@ CONFIG_MOUSE_PS2=y # CONFIG_INPUT_MISC is not set # +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +CONFIG_GAMEPORT=m +# CONFIG_GAMEPORT_NS558 is not set +# CONFIG_GAMEPORT_L4 is not set +# CONFIG_GAMEPORT_EMU10K1 is not set +# CONFIG_GAMEPORT_VORTEX is not set +# CONFIG_GAMEPORT_FM801 is not set +# CONFIG_GAMEPORT_CS461X is not set +CONFIG_SOUND_GAMEPORT=m + +# # Character devices # CONFIG_VT=y @@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y # CONFIG_SYNCLINK is not set # CONFIG_SYNCLINKMP is not set # CONFIG_N_HDLC is not set +# CONFIG_SPECIALIX is not set +# CONFIG_SX is not set # CONFIG_STALDRV is not set # @@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 @@ -670,6 +672,12 @@ CONFIG_HPET=y # CONFIG_HPET_RTC_IRQ is not set CONFIG_HPET_MMAP=y CONFIG_MAX_RAW_DEVS=256 +# CONFIG_HANGCHECK_TIMER is not set + +# +# TPM devices +# +# CONFIG_TCG_TPM is not set # # I2C support @@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256 # CONFIG_VGA_CONSOLE=y CONFIG_DUMMY_CONSOLE=y -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # # Sound @@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y # # USB support # +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y CONFIG_USB=y # CONFIG_USB_DEBUG is not set @@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y # CONFIG_USB_DYNAMIC_MINORS is not set # CONFIG_USB_SUSPEND is not set # CONFIG_USB_OTG is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB_ARCH_HAS_OHCI=y # # USB Host Controller Drivers @@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_SPLIT_ISO is not set # CONFIG_USB_EHCI_ROOT_HUB_TT is not set CONFIG_USB_OHCI_HCD=m +# CONFIG_USB_OHCI_BIG_ENDIAN is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_UHCI_HCD=y # CONFIG_USB_SL811_HCD is not set @@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y # CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_RW_DETECT is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set # CONFIG_USB_STORAGE_DPCM is not set -# CONFIG_USB_STORAGE_HP8200e is not set +# CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set # CONFIG_USB_STORAGE_JUMPSHOT is not set @@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set # CONFIG_USB_USBNET is not set +# CONFIG_USB_MON is not set # # USB port drivers @@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETSERVO is not set # CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_SISUSBVGA is not set # CONFIG_USB_TEST is not set # @@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y CONFIG_REISERFS_FS_SECURITY=y # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y + +# +# XFS support +# CONFIG_XFS_FS=y +CONFIG_XFS_EXPORT=y # CONFIG_XFS_RT is not set # CONFIG_XFS_QUOTA is not set # CONFIG_XFS_SECURITY is not set @@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y CONFIG_NFSD_TCP=y CONFIG_LOCKD=m CONFIG_LOCKD_V4=y -CONFIG_EXPORTFS=m +CONFIG_EXPORTFS=y CONFIG_SUNRPC=m CONFIG_SUNRPC_GSS=m CONFIG_RPCSEC_GSS_KRB5=m @@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y # # Kernel hacking # +# CONFIG_PRINTK_TIME is not set CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=20 # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m # CONFIG_CRYPTO_SHA256 is not set # CONFIG_CRYPTO_SHA512 is not set # CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_TGR192 is not set CONFIG_CRYPTO_DES=m # CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_TWOFISH is not set -- cgit v0.10.2 From 14d50e78f947d340066ee0465dd892ad1d9162c0 Mon Sep 17 00:00:00 2001 From: J Hadi Salim Date: Tue, 3 May 2005 16:29:13 -0700 Subject: [PKT_SCHED]: Action repeat Long standing bug. Policy to repeat an action never worked. Signed-off-by: J Hadi Salim Signed-off-by: David S. Miller diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 5e6cc37..cafcb08 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -171,10 +171,10 @@ repeat: skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); } - if (ret != TC_ACT_PIPE) - goto exec_done; if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ + if (ret != TC_ACT_PIPE) + goto exec_done; } act = a->next; } -- cgit v0.10.2 From cee2824f85414c98fff4004e335a6bc4072c8809 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 3 May 2005 22:04:36 -0700 Subject: [SPARC64]: Fix goal_cpu tracking in retarget_one_irq(). We would never advance the goal_cpu counter like we should, so all IRQs would go to a single processor. Signed-off-by: David S. Miller diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index a38cb50..ab2f368 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -1007,10 +1007,10 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) } upa_writel(tid | IMAP_VALID, imap); - while (!cpu_online(goal_cpu)) { + do { if (++goal_cpu >= NR_CPUS) goal_cpu = 0; - } + } while (!cpu_online(goal_cpu)); return goal_cpu; } -- cgit v0.10.2 From 41832a08feca695158e15a6e58c26b224a7bfae2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 3 May 2005 22:05:43 -0700 Subject: [SPARC64]: Disable IRQ forwarding. There is some race whereby IRQs get stuck, the IRQ status is pending but no processor actually handles the IRQ vector and thus the interrupt. This is a temporary workaround. Signed-off-by: David S. Miller diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index ab2f368..4dcb8af 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -756,7 +756,7 @@ void handler_irq(int irq, struct pt_regs *regs) clear_softint(clr_mask); } #else - int should_forward = 1; + int should_forward = 0; clear_softint(1 << irq); #endif -- cgit v0.10.2 From 6a800d456a81a9046634bcd26d868fd537f0c9ae Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Tue, 3 May 2005 22:17:18 -0700 Subject: [IPV6]: net/addrconf.h needs to include linux/in6.h earlier Else the in6_addr layout is not known for struct prefix_info. Signed-off-by: Patrick McHardy diff --git a/include/net/addrconf.h b/include/net/addrconf.h index f1e5af4..a0ed936 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -17,6 +17,8 @@ #define IPV6_MAX_ADDRESSES 16 +#include + struct prefix_info { __u8 type; __u8 length; @@ -43,7 +45,6 @@ struct prefix_info { #ifdef __KERNEL__ -#include #include #include #include -- cgit v0.10.2 From e2e66446e08a7a365a59e25bbc8dd320ab3da0a6 Mon Sep 17 00:00:00 2001 From: Matthew Dharm Date: Mon, 25 Apr 2005 21:46:29 -0700 Subject: [PATCH] USB Storage: fix compile error This patch fixes a compiler error caused by a missing prototype. It should apply directly to Greg KH's usb-2.6.git tree. Signed-off-by: Matthew Dharm Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c index d764837..5a93217 100644 --- a/drivers/usb/storage/debug.c +++ b/drivers/usb/storage/debug.c @@ -47,6 +47,7 @@ #include #include #include +#include #include "debug.h" #include "scsi.h" -- cgit v0.10.2 From 56c1e26d75008d39f1067f453719857a81109d9f Mon Sep 17 00:00:00 2001 From: David Brownell Date: Sat, 9 Apr 2005 09:00:29 -0700 Subject: [PATCH] USB: ehci power fixes Miscellaneous updates for EHCI. - Mostly updates the power switching on EHCI controllers. One routine centralizes the "power on/off all ports" logic, and the capability to do that is reported more correctly. - Courtesy Colin Leroy, a patch to always power up ports after resumes which didn't keep a USB device suspended. The reset-everything logic powers down those ports (on some hardware) so something needs to turn them back on. - Minor tweaks/bugfixes for the debug port support. Signed-off-by: David Brownell Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 84d2b93..bc69bd7 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -346,6 +346,22 @@ ehci_reboot (struct notifier_block *self, unsigned long code, void *null) return 0; } +static void ehci_port_power (struct ehci_hcd *ehci, int is_on) +{ + unsigned port; + + if (!HCS_PPC (ehci->hcs_params)) + return; + + ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down"); + for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) + (void) ehci_hub_control(ehci_to_hcd(ehci), + is_on ? SetPortFeature : ClearPortFeature, + USB_PORT_FEAT_POWER, + port--, NULL, 0); + msleep(20); +} + /* called by khubd or root hub init threads */ @@ -362,8 +378,10 @@ static int ehci_hc_reset (struct usb_hcd *hcd) dbg_hcs_params (ehci, "reset"); dbg_hcc_params (ehci, "reset"); + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = readl (&ehci->caps->hcs_params); + #ifdef CONFIG_PCI - /* EHCI 0.96 and later may have "extended capabilities" */ if (hcd->self.controller->bus == &pci_bus_type) { struct pci_dev *pdev = to_pci_dev(hcd->self.controller); @@ -383,9 +401,30 @@ static int ehci_hc_reset (struct usb_hcd *hcd) break; } + /* optional debug port, normally in the first BAR */ + temp = pci_find_capability (pdev, 0x0a); + if (temp) { + pci_read_config_dword(pdev, temp, &temp); + temp >>= 16; + if ((temp & (3 << 13)) == (1 << 13)) { + temp &= 0x1fff; + ehci->debug = hcd->regs + temp; + temp = readl (&ehci->debug->control); + ehci_info (ehci, "debug port %d%s\n", + HCS_DEBUG_PORT(ehci->hcs_params), + (temp & DBGP_ENABLED) + ? " IN USE" + : ""); + if (!(temp & DBGP_ENABLED)) + ehci->debug = NULL; + } + } + temp = HCC_EXT_CAPS (readl (&ehci->caps->hcc_params)); } else temp = 0; + + /* EHCI 0.96 and later may have "extended capabilities" */ while (temp && count--) { u32 cap; @@ -414,8 +453,7 @@ static int ehci_hc_reset (struct usb_hcd *hcd) ehci_reset (ehci); #endif - /* cache this readonly data; minimize PCI reads */ - ehci->hcs_params = readl (&ehci->caps->hcs_params); + ehci_port_power (ehci, 0); /* at least the Genesys GL880S needs fixup here */ temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); @@ -657,16 +695,11 @@ done2: static void ehci_stop (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); - u8 rh_ports, port; ehci_dbg (ehci, "stop\n"); /* Turn off port power on all root hub ports. */ - rh_ports = HCS_N_PORTS (ehci->hcs_params); - for (port = 1; port <= rh_ports; port++) - (void) ehci_hub_control(hcd, - ClearPortFeature, USB_PORT_FEAT_POWER, - port, NULL, 0); + ehci_port_power (ehci, 0); /* no more interrupts ... */ del_timer_sync (&ehci->watchdog); @@ -748,7 +781,6 @@ static int ehci_resume (struct usb_hcd *hcd) unsigned port; struct usb_device *root = hcd->self.root_hub; int retval = -EINVAL; - int powerup = 0; // maybe restore (PCI) FLADJ @@ -766,8 +798,6 @@ static int ehci_resume (struct usb_hcd *hcd) up (&hcd->self.root_hub->serialize); break; } - if ((status & PORT_POWER) == 0) - powerup = 1; if (!root->children [port]) continue; dbg_port (ehci, __FUNCTION__, port + 1, status); @@ -794,16 +824,9 @@ static int ehci_resume (struct usb_hcd *hcd) retval = ehci_start (hcd); /* here we "know" root ports should always stay powered; - * but some controllers may lost all power. + * but some controllers may lose all power. */ - if (powerup) { - ehci_dbg (ehci, "...powerup ports...\n"); - for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) - (void) ehci_hub_control(hcd, - SetPortFeature, USB_PORT_FEAT_POWER, - port--, NULL, 0); - msleep(20); - } + ehci_port_power (ehci, 1); } return retval; diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 2373537..02fefab 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -281,6 +281,8 @@ ehci_hub_descriptor ( temp = 0x0008; /* per-port overcurrent reporting */ if (HCS_PPC (ehci->hcs_params)) temp |= 0x0001; /* per-port power control */ + else + temp |= 0x0002; /* no power switching */ #if 0 // re-enable when we support USB_PORT_FEAT_INDICATOR below. if (HCS_INDICATOR (ehci->hcs_params)) diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index e763a83..4df4982 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -47,6 +47,12 @@ struct ehci_stats { #define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */ struct ehci_hcd { /* one per controller */ + /* glue to PCI and HCD framework */ + struct ehci_caps __iomem *caps; + struct ehci_regs __iomem *regs; + struct ehci_dbg_port __iomem *debug; + + __u32 hcs_params; /* cached register copy */ spinlock_t lock; /* async schedule support */ @@ -84,11 +90,6 @@ struct ehci_hcd { /* one per controller */ unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */ - /* glue to PCI and HCD framework */ - struct ehci_caps __iomem *caps; - struct ehci_regs __iomem *regs; - __u32 hcs_params; /* cached register copy */ - /* irq statistics */ #ifdef EHCI_STATS struct ehci_stats stats; @@ -165,7 +166,7 @@ struct ehci_caps { /* these fields are specified as 8 and 16 bit registers, * but some hosts can't perform 8 or 16 bit PCI accesses. */ - u32 hc_capbase; + u32 hc_capbase; #define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */ #define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */ u32 hcs_params; /* HCSPARAMS - offset 0x4 */ @@ -273,7 +274,7 @@ struct ehci_dbg_port { #define DBGP_ENABLED (1<<28) #define DBGP_DONE (1<<16) #define DBGP_INUSE (1<<10) -#define DBGP_ERRCODE(x) (((x)>>7)&0x0f) +#define DBGP_ERRCODE(x) (((x)>>7)&0x07) # define DBGP_ERR_BAD 1 # define DBGP_ERR_SIGNAL 2 #define DBGP_ERROR (1<<6) @@ -282,11 +283,11 @@ struct ehci_dbg_port { #define DBGP_LEN(x) (((x)>>0)&0x0f) u32 pids; #define DBGP_PID_GET(x) (((x)>>16)&0xff) -#define DBGP_PID_SET(data,tok) (((data)<<8)|(tok)); +#define DBGP_PID_SET(data,tok) (((data)<<8)|(tok)) u32 data03; u32 data47; u32 address; -#define DBGP_EPADDR(dev,ep) (((dev)<<8)|(ep)); +#define DBGP_EPADDR(dev,ep) (((dev)<<8)|(ep)) } __attribute__ ((packed)); /*-------------------------------------------------------------------------*/ -- cgit v0.10.2 From 3b86b2028c4de998978baae780edfc36995bebc5 Mon Sep 17 00:00:00 2001 From: Greg KH Date: Mon, 25 Apr 2005 21:46:29 -0700 Subject: [PATCH] USB: add a driver for the AirPrime CDMA Wireless PC card. Easier than trying to use the generic usb-serial driver. Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 0c4aa00..bc798ed 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -53,6 +53,15 @@ config USB_SERIAL_GENERIC support" be compiled as a module for this driver to be used properly. +config USB_SERIAL_AIRPRIME + tristate "USB AirPrime CDMA Wireless Driver" + depends on USB_SERIAL + help + Say Y here if you want to use a AirPrime CDMA Wireless PC card. + + To compile this driver as a module, choose M here: the + module will be called airprime. + config USB_SERIAL_BELKIN tristate "USB Belkin and Peracom Single Port Serial Driver" depends on USB_SERIAL diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index b0aac47d..d56ff6d 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile @@ -11,6 +11,7 @@ usbserial-obj-$(CONFIG_USB_EZUSB) += ezusb.o usbserial-objs := usb-serial.o generic.o bus.o $(usbserial-obj-y) +obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o obj-$(CONFIG_USB_SERIAL_CYBERJACK) += cyberjack.o diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c new file mode 100644 index 0000000..a4ce000 --- /dev/null +++ b/drivers/usb/serial/airprime.c @@ -0,0 +1,63 @@ +/* + * AirPrime CDMA Wireless Serial USB driver + * + * Copyright (C) 2005 Greg Kroah-Hartman + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include "usb-serial.h" + +static struct usb_device_id id_table [] = { + { USB_DEVICE(0xf3d, 0x0112) }, + { }, +}; +MODULE_DEVICE_TABLE(usb, id_table); + +static struct usb_driver airprime_driver = { + .owner = THIS_MODULE, + .name = "airprime", + .probe = usb_serial_probe, + .disconnect = usb_serial_disconnect, + .id_table = id_table, +}; + +static struct usb_serial_device_type airprime_device = { + .owner = THIS_MODULE, + .name = "airprime", + .id_table = id_table, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 1, +}; + +static int __init airprime_init(void) +{ + int retval; + + retval = usb_serial_register(&airprime_device); + if (retval) + return retval; + retval = usb_register(&airprime_driver); + if (retval) + usb_serial_deregister(&airprime_device); + return retval; +} + +static void __exit airprime_exit(void) +{ + usb_deregister(&airprime_driver); + usb_serial_deregister(&airprime_device); +} + +module_init(airprime_init); +module_exit(airprime_exit); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From b9ab0746a53b9cd5c64e179a1c13952f0ae80e64 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Mon, 2 May 2005 22:47:55 +0200 Subject: [PATCH] USB: new usbnet device id On Thu, Apr 14, 2005 at 11:06:21PM +0400, Sergey Vlasov wrote: > http://thread.gmane.org/gmane.linux.usb.devel/32977 > > (see "[PATCH] N/3 cdc acm errors"). > > You also need this driver core fix: > > http://thread.gmane.org/gmane.linux.usb.devel/33132 I reproduced the same oops while trying to execute at+mode=99, it would be nice to get these fix merged since I believe it's still needed to connect the laptop over gprs (something I didn't test yet). This further patch will allow you to connect via usbnet, Greg could you apply? Thanks! Signed-off-by: Andrea Arcangeli Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index a45ea7c..f6bc6b3 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c @@ -4071,6 +4071,9 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader .driver_info = (unsigned long) &blob_info, }, { + USB_DEVICE (0x22b8, 0x600c), // USBNET Motorola E680 + .driver_info = (unsigned long) &linuxdev_info, +}, { // Linux Ethernet/RNDIS gadget on pxa210/25x/26x // e.g. Gumstix, current OpenZaurus, ... USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203), -- cgit v0.10.2 From 6f92872cc8085b9e65a8ec07aed106ce5d24f60b Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Fri, 29 Apr 2005 16:06:14 +0100 Subject: [PATCH] USB: VID/PID updates for ftdi_sio driver Some VID/PID updates for the ftdi_sio driver: * The "Gude Analog- und Digitalsysteme GmbH" entries were missing from the "combined" table. * Replaced FTDI_8U232AM_ALT_ALT_PID with 3 PIDs for devices from 4n-galaxy.de. * Removed redundant FTDI_RM_VID and renamed FTDI_RMCANVIEW_PID to FTDI_RM_CANVIEW_PID. * Added VID/PID for serial converter in Mobility Electronics EasiDock USB 200 (mentioned by Gregory Schmitt). * Added PID for Active Robots USB comms board (mentioned by John Koch). Signed-off-by: Ian Abbott Signed-off-by: Greg Kroah-Hartman diff -ur a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4c788c7..b31e09b 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -272,6 +272,7 @@ static int debug; static struct usb_device_id id_table_sio [] = { { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { } /* Terminating entry */ }; @@ -296,7 +297,6 @@ static struct usb_device_id id_table_8U232AM [] = { { USB_DEVICE_VER(FTDI_VID, FTDI_IRTRANS_PID, 0, 0x3ff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_8U232AM_PID, 0, 0x3ff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_8U232AM_ALT_PID, 0, 0x3ff) }, - { USB_DEVICE_VER(FTDI_VID, FTDI_8U232AM_ALT_ALT_PID, 0, 0x3ff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_RELAIS_PID, 0, 0x3ff) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, @@ -369,11 +369,14 @@ static struct usb_device_id id_table_8U232AM [] = { { USB_DEVICE_VER(INTREPID_VID, INTREPID_NEOVI_PID, 0, 0x3ff) }, { USB_DEVICE_VER(FALCOM_VID, FALCOM_TWIST_PID, 0, 0x3ff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_SUUNTO_SPORTS_PID, 0, 0x3ff) }, - { USB_DEVICE_VER(FTDI_RM_VID, FTDI_RMCANVIEW_PID, 0, 0x3ff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_RM_CANVIEW_PID, 0, 0x3ff) }, { USB_DEVICE_VER(BANDB_VID, BANDB_USOTL4_PID, 0, 0x3ff) }, { USB_DEVICE_VER(BANDB_VID, BANDB_USTL4_PID, 0, 0x3ff) }, { USB_DEVICE_VER(BANDB_VID, BANDB_USO9ML2_PID, 0, 0x3ff) }, { USB_DEVICE_VER(FTDI_VID, EVER_ECO_PRO_CDS, 0, 0x3ff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_4N_GALAXY_DE_0_PID, 0, 0x3ff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID, 0, 0x3ff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID, 0, 0x3ff) }, { } /* Terminating entry */ }; @@ -382,7 +385,6 @@ static struct usb_device_id id_table_FT232BM [] = { { USB_DEVICE_VER(FTDI_VID, FTDI_IRTRANS_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_8U232AM_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_8U232AM_ALT_PID, 0x400, 0xffff) }, - { USB_DEVICE_VER(FTDI_VID, FTDI_8U232AM_ALT_ALT_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_RELAIS_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_XF_632_PID, 0x400, 0xffff) }, @@ -485,11 +487,15 @@ static struct usb_device_id id_table_FT232BM [] = { { USB_DEVICE_VER(INTREPID_VID, INTREPID_NEOVI_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FALCOM_VID, FALCOM_TWIST_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, FTDI_SUUNTO_SPORTS_PID, 0x400, 0xffff) }, - { USB_DEVICE_VER(FTDI_RM_VID, FTDI_RMCANVIEW_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_RM_CANVIEW_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(BANDB_VID, BANDB_USOTL4_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(BANDB_VID, BANDB_USTL4_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(BANDB_VID, BANDB_USO9ML2_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, EVER_ECO_PRO_CDS, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_4N_GALAXY_DE_0_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID, 0x400, 0xffff) }, { } /* Terminating entry */ }; @@ -517,7 +523,6 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_ALT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, @@ -596,6 +601,22 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, PROTEGO_R2X0) }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_3) }, { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_4) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E808_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E809_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E80A_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E80B_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E80C_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E80D_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E80E_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E80F_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E888_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E889_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E88A_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E88B_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E88C_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E88D_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E88E_PID, 0x400, 0xffff) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_GUDEADS_E88F_PID, 0x400, 0xffff) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_UO100_PID) }, { USB_DEVICE_VER(FTDI_VID, LINX_SDMUSBQSS_PID, 0x400, 0xffff) }, { USB_DEVICE_VER(FTDI_VID, LINX_MASTERDEVEL2_PID, 0x400, 0xffff) }, @@ -609,11 +630,16 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) }, { USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) }, - { USB_DEVICE(FTDI_RM_VID, FTDI_RMCANVIEW_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, + { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_0_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, + { USB_DEVICE_VER(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID, 0x400, 0xffff) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index be5d60b..3734b5e 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -26,7 +26,6 @@ #define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ #define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */ #define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ -#define FTDI_8U232AM_ALT_ALT_PID 0xf3c0 /* FTDI's second alternate PID for above */ #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ #define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */ #define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */ @@ -157,7 +156,8 @@ */ #define OCT_VID 0x0B39 /* OCT vendor ID */ /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ -/* Also rebadged as SIIG Inc. model US2308 */ +/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ +/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ /* an infrared receiver for user access control with IR tags */ @@ -236,10 +236,10 @@ /* * RM Michaelides CANview USB (http://www.rmcan.com) - * CAN filedbus interface adapter, addad by port GmbH www.port.de) + * CAN fieldbus interface adapter, added by port GmbH www.port.de) + * Ian Abbott changed the macro names for consistency. */ -#define FTDI_RM_VID 0x0403 /* Vendor Id */ -#define FTDI_RMCANVIEW_PID 0xfd60 /* Product Id */ +#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */ /* * EVER Eco Pro UPS (http://www.ever.com.pl/) @@ -247,6 +247,26 @@ #define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */ +/* + * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485, + * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices + * and I'm not entirely sure which are used by which. + */ +#define FTDI_4N_GALAXY_DE_0_PID 0x8372 +#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 +#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1 + +/* + * Mobility Electronics products. + */ +#define MOBILITY_VID 0x1342 +#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */ + +/* + * Active Robots product ids. + */ +#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */ + /* Commands */ #define FTDI_SIO_RESET 0 /* Reset the port */ #define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ -- cgit v0.10.2 From 5e54f91d8d770c5a48da10a5c9c314c1c926536b Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Fri, 29 Apr 2005 16:09:44 +0100 Subject: [PATCH] USB: ftdi_sio redundant macro removal [ftdi_sio] Replaced redundant INTERFACE_A and INTERFACE_B macros with the equivalent PIT_SIOA and PIT_SIOB macros. Signed-off-by: Ian Abbott Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b31e09b..0c4b435 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1483,10 +1483,10 @@ static int ftdi_FT2232C_startup (struct usb_serial *serial) inter = serial->interface->altsetting->desc.bInterfaceNumber; if (inter) { - priv->interface = INTERFACE_B; + priv->interface = PIT_SIOB; } else { - priv->interface = INTERFACE_A; + priv->interface = PIT_SIOA; } priv->baud_base = 48000000 / 2; /* Would be / 16, but FT2232C supports multiple of 0.125 divisor fractions! */ diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 3734b5e..96be364 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -279,10 +279,6 @@ #define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */ #define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */ -/* Port interface code for FT2232C */ -#define INTERFACE_A 1 -#define INTERFACE_B 2 - /* * BmRequestType: 1100 0000b -- cgit v0.10.2 From 8af60be9a71d2c636cd800deab63a9fadf3aee96 Mon Sep 17 00:00:00 2001 From: Vivian Bregier Date: Tue, 3 May 2005 02:16:34 +0200 Subject: [PATCH] USB: unusual_devs.h: atmel snd1 storage Signed-off-by: Phil Dibowitz Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index bbda63c2..cb01214 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -48,6 +48,14 @@ * USB development list . */ +/* patch submitted by Vivian Bregier + */ +UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100, + "ATMEL", + "SND1 Storage", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE), + UNUSUAL_DEV( 0x03ee, 0x6901, 0x0000, 0x0100, "Mitsumi", "USB FDD", -- cgit v0.10.2 From 7b1cbebaa7ced7a2029cc5b50eab60c79b24cc10 Mon Sep 17 00:00:00 2001 From: Phil Dibowitz Date: Mon, 2 May 2005 23:54:28 -0700 Subject: [PATCH] USB: unusual_devs entry for Minolta Dimage Z10 This patch adds an unusual_devs entry for the Minolta Dimage Z10. Originally reported by Vilisas Signed-off-by: Phil Dibowitz Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index cb01214..99e8249 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1002,6 +1002,13 @@ UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 0 ), +/* Reported by Vilius Bilinkevicius */ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, "SWISSBIT", -- cgit v0.10.2 From 093cf723b2b06d774929ea07982f6a466ff22314 Mon Sep 17 00:00:00 2001 From: Steven Cole Date: Tue, 3 May 2005 19:07:24 -0600 Subject: [PATCH] USB: Spelling fixes for drivers/usb. Here are some spelling corrections for drivers/usb. cancelation -> cancellation succesful -> successful cancelation -> cancellation decriptor -> descriptor Initalize -> Initialize wierd -> weird Protocoll -> Protocol occured -> occurred successfull -> successful Procesing -> Processing devide -> divide Isochronuous -> Isochronous noticable -> noticeable Basicly -> Basically transfering -> transferring intialize -> initialize Incomming -> Incoming additionnal -> additional asume -> assume Unfortunatly -> Unfortunately retreive -> retrieve tranceiver -> transceiver Compatiblity -> Compatibility Incorprated -> Incorporated existance -> existence Ununsual -> Unusual Signed-off-by: Steven Cole Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index e12c5be..f50aaf2 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -431,7 +431,7 @@ nomem: * (2) error, where io->status is a negative errno value. The number * of io->bytes transferred before the error is usually less * than requested, and can be nonzero. - * (3) cancelation, a type of error with status -ECONNRESET that + * (3) cancellation, a type of error with status -ECONNRESET that * is initiated by usb_sg_cancel(). * * When this function returns, all memory allocated through usb_sg_init() or @@ -1282,7 +1282,7 @@ static void release_interface(struct device *dev) * bus rwsem; usb device driver probe() methods cannot use this routine. * * Returns zero on success, or else the status code returned by the - * underlying call that failed. On succesful completion, each interface + * underlying call that failed. On successful completion, each interface * in the original device configuration has been destroyed, and each one * in the new configuration has been probed by all relevant usb device * drivers currently known to the kernel. diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 1697215..0faf18d 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -121,7 +121,7 @@ struct urb * usb_get_urb(struct urb *urb) * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink - * (a software-induced fault, also called "request cancelation"). + * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * @@ -170,7 +170,7 @@ struct urb * usb_get_urb(struct urb *urb) * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup - * after faults (transfer errors or cancelation). + * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * @@ -395,7 +395,7 @@ int usb_submit_urb(struct urb *urb, int mem_flags) * * This routine cancels an in-progress request. URBs complete only * once per submission, and may be canceled only once per submission. - * Successful cancelation means the requests's completion handler will + * Successful cancellation means the requests's completion handler will * be called with a status code indicating that the request has been * canceled (rather than any other code) and will quickly be removed * from host controller data structures. diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 3993156..3f783cb 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -569,7 +569,7 @@ static const struct usb_cdc_ether_desc ether_desc = { /* include the status endpoint if we can, even where it's optional. * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one - * packet, to simplify cancelation; and a big transfer interval, to + * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. * * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 2cff67c..1e5e6dd 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -275,7 +275,7 @@ static const char *CHIP; * * After opening, configure non-control endpoints. Then use normal * stream read() and write() requests; and maybe ioctl() to get more - * precise FIFO status when recovering from cancelation. + * precise FIFO status when recovering from cancellation. */ static void epio_complete (struct usb_ep *ep, struct usb_request *req) diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c index 0def9f7..df75ab6 100644 --- a/drivers/usb/gadget/lh7a40x_udc.c +++ b/drivers/usb/gadget/lh7a40x_udc.c @@ -705,7 +705,7 @@ void nuke(struct lh7a40x_ep *ep, int status) done(ep, req, status); } - /* Disable IRQ if EP is enabled (has decriptor) */ + /* Disable IRQ if EP is enabled (has descriptor) */ if (ep->desc) pio_irq_disable(ep_index(ep)); } diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c index f1762ed..4d591c7 100644 --- a/drivers/usb/gadget/serial.c +++ b/drivers/usb/gadget/serial.c @@ -240,7 +240,7 @@ struct gs_dev { struct usb_ep *dev_notify_ep; /* address of notify endpoint */ struct usb_ep *dev_in_ep; /* address of in endpoint */ struct usb_ep *dev_out_ep; /* address of out endpoint */ - struct usb_endpoint_descriptor /* desciptor of notify ep */ + struct usb_endpoint_descriptor /* descriptor of notify ep */ *dev_notify_ep_desc; struct usb_endpoint_descriptor /* descriptor of in endpoint */ *dev_in_ep_desc; diff --git a/drivers/usb/host/hc_crisv10.c b/drivers/usb/host/hc_crisv10.c index 376f8a0..d9883d7 100644 --- a/drivers/usb/host/hc_crisv10.c +++ b/drivers/usb/host/hc_crisv10.c @@ -4329,7 +4329,7 @@ static int __init etrax_usb_hc_init(void) bus->bus_name="ETRAX 100LX"; bus->hcpriv = hc; - /* Initalize RH to the default address. + /* Initialize RH to the default address. And make sure that we have no status change indication */ hc->rh.numports = 2; /* The RH has two ports */ hc->rh.devnum = 1; diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index d309e29..a374b76 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -134,7 +134,7 @@ static void port_power(struct sl811 *sl811, int is_on) /* This is a PIO-only HCD. Queueing appends URBs to the endpoint's queue, * and may start I/O. Endpoint queues are scanned during completion irq - * handlers (one per packet: ACK, NAK, faults, etc) and urb cancelation. + * handlers (one per packet: ACK, NAK, faults, etc) and urb cancellation. * * Using an external DMA engine to copy a packet at a time could work, * though setup/teardown costs may be too big to make it worthwhile. @@ -738,7 +738,7 @@ retry: } #endif - /* port status seems wierd until after reset, so + /* port status seems weird until after reset, so * force the reset and make khubd clean up later. */ sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION) diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index 5791723..a330a4b 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -23,7 +23,7 @@ * * * The driver brings the USB functions of the MDC800 to Linux. - * To use the Camera you must support the USB Protocoll of the camera + * To use the Camera you must support the USB Protocol of the camera * to the Kernel Node. * The Driver uses a misc device Node. Create it with : * mknod /dev/mustek c 180 32 diff --git a/drivers/usb/input/aiptek.c b/drivers/usb/input/aiptek.c index 2d76be6..94ce2a9 100644 --- a/drivers/usb/input/aiptek.c +++ b/drivers/usb/input/aiptek.c @@ -386,7 +386,7 @@ static int aiptek_convert_from_2s_complement(unsigned char c) * convention above.) I therefore have taken over REL_MISC and ABS_MISC * (for relative and absolute reports, respectively) for communicating * Proximity. Why two events? I thought it interesting to know if the - * Proximity event occured while the tablet was in absolute or relative + * Proximity event occurred while the tablet was in absolute or relative * mode. * * Other tablets use the notion of a certain minimum stylus pressure diff --git a/drivers/usb/input/mtouchusb.c b/drivers/usb/input/mtouchusb.c index 6b45a66..ab1a2a3 100644 --- a/drivers/usb/input/mtouchusb.c +++ b/drivers/usb/input/mtouchusb.c @@ -32,7 +32,7 @@ * Changed reset from standard USB dev reset to vendor reset * Changed data sent to host from compensated to raw coordinates * Eliminated vendor/product module params - * Performed multiple successfull tests with an EXII-5010UC + * Performed multiple successful tests with an EXII-5010UC * * 1.5 02/27/2005 ddstreet@ieee.org * Added module parameter to select raw or hw-calibrated coordinate reporting diff --git a/drivers/usb/media/ov511.c b/drivers/usb/media/ov511.c index d605182..036c485 100644 --- a/drivers/usb/media/ov511.c +++ b/drivers/usb/media/ov511.c @@ -5041,7 +5041,7 @@ ov6xx0_configure(struct usb_ov511 *ov) { OV511_I2C_BUS, 0x2a, 0x04 }, /* Disable framerate adjust */ // { OV511_I2C_BUS, 0x2b, 0xac }, /* Framerate; Set 2a[7] first */ { OV511_I2C_BUS, 0x2d, 0x99 }, - { OV511_I2C_BUS, 0x33, 0xa0 }, /* Color Procesing Parameter */ + { OV511_I2C_BUS, 0x33, 0xa0 }, /* Color Processing Parameter */ { OV511_I2C_BUS, 0x34, 0xd2 }, /* Max A/D range */ { OV511_I2C_BUS, 0x38, 0x8b }, { OV511_I2C_BUS, 0x39, 0x40 }, diff --git a/drivers/usb/media/pwc/pwc-ctrl.c b/drivers/usb/media/pwc/pwc-ctrl.c index 42352f5..42ec468 100644 --- a/drivers/usb/media/pwc/pwc-ctrl.c +++ b/drivers/usb/media/pwc/pwc-ctrl.c @@ -1100,7 +1100,7 @@ static inline int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt) unsigned char buf[4]; /* set new relative angle; angles are expressed in degrees * 100, - but cam as .5 degree resolution, hence devide by 200. Also + but cam as .5 degree resolution, hence divide by 200. Also the angle must be multiplied by 64 before it's send to the cam (??) */ diff --git a/drivers/usb/media/pwc/pwc-if.c b/drivers/usb/media/pwc/pwc-if.c index c53e226..cca47f4 100644 --- a/drivers/usb/media/pwc/pwc-if.c +++ b/drivers/usb/media/pwc/pwc-if.c @@ -272,7 +272,7 @@ static int pwc_allocate_buffers(struct pwc_device *pdev) return -ENXIO; } #endif - /* Allocate Isochronuous pipe buffers */ + /* Allocate Isochronous pipe buffers */ for (i = 0; i < MAX_ISO_BUFS; i++) { if (pdev->sbuf[i].data == NULL) { kbuf = kmalloc(ISO_BUFFER_SIZE, GFP_KERNEL); @@ -850,7 +850,7 @@ static int pwc_isoc_init(struct pwc_device *pdev) if (pdev->vmax_packet_size < 0 || pdev->vmax_packet_size > ISO_MAX_FRAME_SIZE) { Err("Failed to find packet size for video endpoint in current alternate setting.\n"); - return -ENFILE; /* Odd error, that should be noticable */ + return -ENFILE; /* Odd error, that should be noticeable */ } /* Set alternate interface */ @@ -2128,7 +2128,7 @@ static int __init usb_pwc_init(void) if (leds[1] >= 0) led_off = leds[1]; - /* Big device node whoopla. Basicly, it allows you to assign a + /* Big device node whoopla. Basically, it allows you to assign a device node (/dev/videoX) to a camera, based on its type & serial number. The format is [type[.serialnumber]:]node. diff --git a/drivers/usb/media/pwc/pwc-ioctl.h b/drivers/usb/media/pwc/pwc-ioctl.h index 65805ea..5f9cb08 100644 --- a/drivers/usb/media/pwc/pwc-ioctl.h +++ b/drivers/usb/media/pwc/pwc-ioctl.h @@ -75,7 +75,7 @@ #define PWC_FPS_SNAPSHOT 0x00400000 -/* structure for transfering x & y coordinates */ +/* structure for transferring x & y coordinates */ struct pwc_coord { int x, y; /* guess what */ diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index dd4580c..7d06105 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -859,7 +859,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device info ("udev is NULL."); } - /* allocate memory for our device state and intialize it */ + /* allocate memory for our device state and initialize it */ dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL); diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c index f98cb2a..341ae5f 100644 --- a/drivers/usb/net/zd1201.c +++ b/drivers/usb/net/zd1201.c @@ -183,7 +183,7 @@ static void zd1201_usbtx(struct urb *urb, struct pt_regs *regs) return; } -/* Incomming data */ +/* Incoming data */ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs) { struct zd1201 *zd = urb->context; @@ -772,7 +772,7 @@ static int zd1201_net_stop(struct net_device *dev) /* RFC 1042 encapsulates Ethernet frames in 802.11 frames by prefixing them with 0xaa, 0xaa, 0x03) followed by a SNAP OID of 0 - (0x00, 0x00, 0x00). Zd requires an additionnal padding, copy + (0x00, 0x00, 0x00). Zd requires an additional padding, copy of ethernet addresses, length of the standard RFC 1042 packet and a command byte (which is nul for tx). @@ -1098,7 +1098,7 @@ static int zd1201_get_range(struct net_device *dev, /* Little bit of magic here: we only get the quality if we poll * for it, and we never get an actual request to trigger such - * a poll. Therefore we 'asume' that the user will soon ask for + * a poll. Therefore we 'assume' that the user will soon ask for * the stats after asking the bssid. */ static int zd1201_get_wap(struct net_device *dev, @@ -1108,7 +1108,7 @@ static int zd1201_get_wap(struct net_device *dev, unsigned char buffer[6]; if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) { - /* Unfortunatly the quality and noise reported is useless. + /* Unfortunately the quality and noise reported is useless. they seem to be accumulators that increase until you read them, unless we poll on a fixed interval we can't use them diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index d165f42..9db23de 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -234,7 +234,7 @@ static struct usb_serial_device_type cypress_hidcom_device = { *****************************************************************************/ -/* This function can either set or retreive the current serial line settings */ +/* This function can either set or retrieve the current serial line settings */ static int cypress_serial_control (struct usb_serial_port *port, unsigned baud_mask, int data_bits, int stop_bits, int parity_enable, int parity_type, int reset, int cypress_request_type) { diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0c4b435..52394f0 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -76,7 +76,7 @@ * Defererence pointers after any paranoid checks, not before. * * (21/Jun/2003) Erik Nygren - * Added support for Home Electronics Tira-1 IR tranceiver using FT232BM chip. + * Added support for Home Electronics Tira-1 IR transceiver using FT232BM chip. * See . Only operates properly * at 100000 and RTS-CTS, so set custom divisor mode on startup. * Also force the Tira-1 and USB-UIRT to only use their custom baud rates. @@ -91,7 +91,7 @@ * Minor whitespace and comment changes. * * (12/Jun/2003) David Norwood - * Added support for USB-UIRT IR tranceiver using 8U232AM chip. + * Added support for USB-UIRT IR transceiver using 8U232AM chip. * See . Only * operates properly at 312500, so set custom divisor mode on startup. * diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 96be364..a52bb13 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -136,7 +136,7 @@ /* * Home Electronics (www.home-electro.com) USB gadgets */ -#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR tranceiver */ +#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */ /* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */ /* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */ diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h index 8c1fa5e..f1804fd 100644 --- a/drivers/usb/serial/io_usbvend.h +++ b/drivers/usb/serial/io_usbvend.h @@ -289,7 +289,7 @@ // // -// Edgeport Compatiblity Descriptor +// Edgeport Compatibility Descriptor // // This descriptor is only returned by Edgeport-compatible devices // supporting the EPiC spec. True ION devices do not return this diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h index dd935b6..86708ec 100644 --- a/drivers/usb/serial/keyspan_usa90msg.h +++ b/drivers/usb/serial/keyspan_usa90msg.h @@ -19,7 +19,7 @@ This file is available under a BSD-style copyright - 2. The name of InnoSys Incorprated may not be used to endorse or promote + 2. The name of InnoSys Incorporated may not be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c index 7eff03d..f3b6028 100644 --- a/drivers/usb/storage/shuttle_usbat.c +++ b/drivers/usb/storage/shuttle_usbat.c @@ -786,7 +786,7 @@ static int usbat_flash_check_media(struct us_data *us, if (rc != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; - // Check for media existance + // Check for media existence rc = usbat_flash_check_media_present(uio); if (rc == USBAT_FLASH_MEDIA_NONE) { info->sense_key = 0x02; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 99e8249..d2891f4 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1,5 +1,5 @@ /* Driver for USB Mass Storage compliant devices - * Ununsual Devices File + * Unusual Devices File * * $Id: unusual_devs.h,v 1.32 2002/02/25 02:41:24 mdharm Exp $ * -- cgit v0.10.2 From 3cb4a4f739cccdf0642fa7e3a087e0b8425ff67b Mon Sep 17 00:00:00 2001 From: Lonnie Mendez Date: Tue, 3 May 2005 17:02:20 -0500 Subject: [PATCH] USB cypress_m8: update kernel driver with current source Fixed problem where setting or retreiving the serial config would fail with EPIPE. Removed CRTS toggling so the driver behaves more like other usbserial adapters. Issued new interval of 1ms instead of the default bInterval. As a result, transfer speed has been substantially increased. From avg. 850bps to avg. 3300bps. Also added new module parameter 'interval' to tweak the interval in case this change causes problems for someone. Cleaned up code and formatting issues so source is more readable. Replaced the C++ style comments. Various other code cleanups. Signed-off-by: Lonnie Mendez Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 9db23de..f34a9bb 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -16,6 +16,14 @@ * See http://geocities.com/i0xox0i for information on this driver and the * earthmate usb device. * + * Lonnie Mendez + * 4-29-2005 + * Fixed problem where setting or retreiving the serial config would fail with + * EPIPE. Removed CRTS toggling so the driver behaves more like other usbserial + * adapters. Issued new interval of 1ms instead of the default 10ms. As a + * result, transfer speed has been substantially increased. From avg. 850bps to + * avg. 3300bps. initial termios has also been modified. Cleaned up code and + * formatting issues so it is more readable. Replaced the C++ style comments. * * Lonnie Mendez * 12-15-2004 @@ -32,12 +40,6 @@ * 10-2003 * Driver first released. * - * - * Long Term TODO: - * Improve transfer speeds - both read/write are somewhat slow - * at this point. - * Improve debugging. Show modem line status with debug output and - * implement filtering for certain data as a module parameter. */ /* Thanks to Neil Whelchel for writing the first cypress m8 implementation for linux. */ @@ -72,11 +74,12 @@ static int debug; #endif static int stats; +static int interval; /* * Version Information */ -#define DRIVER_VERSION "v1.08" +#define DRIVER_VERSION "v1.09" #define DRIVER_AUTHOR "Lonnie Mendez , Neil Whelchel " #define DRIVER_DESC "Cypress USB to Serial Driver" @@ -130,7 +133,6 @@ struct cypress_private { char prev_status, diff_status; /* used for TIOCMIWAIT */ /* we pass a pointer to this as the arguement sent to cypress_set_termios old_termios */ struct termios tmp_termios; /* stores the old termios settings */ - char calledfromopen; /* used when issuing lines on open - fixes rts drop bug */ }; /* write buffer structure */ @@ -168,10 +170,8 @@ static void cypress_buf_free(struct cypress_buf *cb); static void cypress_buf_clear(struct cypress_buf *cb); static unsigned int cypress_buf_data_avail(struct cypress_buf *cb); static unsigned int cypress_buf_space_avail(struct cypress_buf *cb); -static unsigned int cypress_buf_put(struct cypress_buf *cb, const char *buf, - unsigned int count); -static unsigned int cypress_buf_get(struct cypress_buf *cb, char *buf, - unsigned int count); +static unsigned int cypress_buf_put(struct cypress_buf *cb, const char *buf, unsigned int count); +static unsigned int cypress_buf_get(struct cypress_buf *cb, char *buf, unsigned int count); static struct usb_serial_device_type cypress_earthmate_device = { @@ -238,10 +238,9 @@ static struct usb_serial_device_type cypress_hidcom_device = { static int cypress_serial_control (struct usb_serial_port *port, unsigned baud_mask, int data_bits, int stop_bits, int parity_enable, int parity_type, int reset, int cypress_request_type) { - int i, n_baud_rate = 0, retval = 0; + int new_baudrate = 0, retval = 0, tries = 0; struct cypress_private *priv; - __u8 feature_buffer[5]; - __u8 config; + __u8 feature_buffer[8]; unsigned long flags; dbg("%s", __FUNCTION__); @@ -256,7 +255,8 @@ static int cypress_serial_control (struct usb_serial_port *port, unsigned baud_m * of 57600bps (I have no idea whether DeLorme chose to use the general purpose * firmware or not), if you need to modify this speed setting for your own * project please add your own chiptype and modify the code likewise. The - * Cypress HID->COM device will work successfully up to 115200bps. + * Cypress HID->COM device will work successfully up to 115200bps (but the + * actual throughput is around 3kBps). */ if (baud_mask != priv->cbr_mask) { dbg("%s - baud rate is changing", __FUNCTION__); @@ -265,109 +265,114 @@ static int cypress_serial_control (struct usb_serial_port *port, unsigned baud_m * but are not used with NMEA and SiRF protocols */ if ( (baud_mask == B300) || (baud_mask == B600) ) { - err("%s - failed setting baud rate, unsupported speed (default to 4800)", + err("%s - failed setting baud rate, unsupported speed", __FUNCTION__); - n_baud_rate = 4800; - } else if ( (n_baud_rate = mask_to_rate(baud_mask)) == -1) { - err("%s - failed setting baud rate, unsupported speed (default to 4800)", + new_baudrate = priv->baud_rate; + } else if ( (new_baudrate = mask_to_rate(baud_mask)) == -1) { + err("%s - failed setting baud rate, unsupported speed", __FUNCTION__); - n_baud_rate = 4800; + new_baudrate = priv->baud_rate; } } else if (priv->chiptype == CT_CYPHIDCOM) { - if ( (n_baud_rate = mask_to_rate(baud_mask)) == -1) { - err("%s - failed setting baud rate, unsupported speed (default to 4800)", + if ( (new_baudrate = mask_to_rate(baud_mask)) == -1) { + err("%s - failed setting baud rate, unsupported speed", __FUNCTION__); - n_baud_rate = 4800; + new_baudrate = priv->baud_rate; } } else if (priv->chiptype == CT_GENERIC) { - if ( (n_baud_rate = mask_to_rate(baud_mask)) == -1) { - err("%s - failed setting baud rate, unsupported speed (default to 4800)", + if ( (new_baudrate = mask_to_rate(baud_mask)) == -1) { + err("%s - failed setting baud rate, unsupported speed", __FUNCTION__); - n_baud_rate = 4800; + new_baudrate = priv->baud_rate; } } else { - info("%s - please define your chiptype, using 4800bps default", __FUNCTION__); - n_baud_rate = 4800; + info("%s - please define your chiptype", __FUNCTION__); + new_baudrate = priv->baud_rate; } } else { /* baud rate not changing, keep the old */ - n_baud_rate = priv->baud_rate; + new_baudrate = priv->baud_rate; } - dbg("%s - baud rate is being sent as %d", __FUNCTION__, n_baud_rate); - + dbg("%s - baud rate is being sent as %d", __FUNCTION__, new_baudrate); - /* - * This algorithm accredited to Jiang Jay Zhang... thanks for all the help! - */ - for (i = 0; i < 4; ++i) { - feature_buffer[i] = ( n_baud_rate >> (i*8) & 0xFF ); - } + memset(feature_buffer, 0, 8); + /* fill the feature_buffer with new configuration */ + *((u_int32_t *)feature_buffer) = new_baudrate; - config = 0; // reset config byte - config |= data_bits; // assign data bits in 2 bit space ( max 3 ) + feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */ /* 1 bit gap */ - config |= (stop_bits << 3); // assign stop bits in 1 bit space - config |= (parity_enable << 4); // assign parity flag in 1 bit space - config |= (parity_type << 5); // assign parity type in 1 bit space + feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */ + feature_buffer[4] |= (parity_enable << 4); /* assign parity flag in 1 bit space */ + feature_buffer[4] |= (parity_type << 5); /* assign parity type in 1 bit space */ /* 1 bit gap */ - config |= (reset << 7); // assign reset at end of byte, 1 bit space - - feature_buffer[4] = config; + feature_buffer[4] |= (reset << 7); /* assign reset at end of byte, 1 bit space */ dbg("%s - device is being sent this feature report:", __FUNCTION__); dbg("%s - %02X - %02X - %02X - %02X - %02X", __FUNCTION__, feature_buffer[0], feature_buffer[1], feature_buffer[2], feature_buffer[3], feature_buffer[4]); + do { retval = usb_control_msg (port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, - 0x0300, 0, feature_buffer, 5, 500); + 0x0300, 0, feature_buffer, 8, 500); + + if (tries++ >= 3) + break; - if (retval != 5) + if (retval == EPIPE) + usb_clear_halt(port->serial->dev, 0x00); + } while (retval != 8 && retval != ENODEV); + + if (retval != 8) err("%s - failed sending serial line settings - %d", __FUNCTION__, retval); else { spin_lock_irqsave(&priv->lock, flags); - priv->baud_rate = n_baud_rate; + priv->baud_rate = new_baudrate; priv->cbr_mask = baud_mask; - priv->current_config = config; - ++priv->cmd_count; + priv->current_config = feature_buffer[4]; spin_unlock_irqrestore(&priv->lock, flags); } break; case CYPRESS_GET_CONFIG: dbg("%s - retreiving serial line settings", __FUNCTION__); - /* reset values in feature buffer */ - memset(feature_buffer, 0, 5); + /* set initial values in feature buffer */ + memset(feature_buffer, 0, 8); + do { retval = usb_control_msg (port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS, - 0x0300, 0, feature_buffer, 5, 500); + 0x0300, 0, feature_buffer, 8, 500); + + if (tries++ >= 3) + break; + + if (retval == EPIPE) + usb_clear_halt(port->serial->dev, 0x00); + } while (retval != 5 && retval != ENODEV); + if (retval != 5) { err("%s - failed to retreive serial line settings - %d", __FUNCTION__, retval); return retval; } else { spin_lock_irqsave(&priv->lock, flags); + /* store the config in one byte, and later use bit masks to check values */ priv->current_config = feature_buffer[4]; - /* reverse the process above to get the baud_mask value */ - n_baud_rate = 0; // reset bits - for (i = 0; i < 4; ++i) { - n_baud_rate |= ( feature_buffer[i] << (i*8) ); - } + priv->baud_rate = *((u_int32_t *)feature_buffer); - priv->baud_rate = n_baud_rate; - if ( (priv->cbr_mask = rate_to_mask(n_baud_rate)) == 0x40) + if ( (priv->cbr_mask = rate_to_mask(priv->baud_rate)) == 0x40) dbg("%s - failed setting the baud mask (not defined)", __FUNCTION__); - ++priv->cmd_count; spin_unlock_irqrestore(&priv->lock, flags); } - break; - default: - err("%s - unsupported serial control command issued", __FUNCTION__); } + spin_lock_irqsave(&priv->lock, flags); + ++priv->cmd_count; + spin_unlock_irqrestore(&priv->lock, flags); + return retval; } /* cypress_serial_control */ -/* given a baud mask, it will return speed on success */ +/* given a baud mask, it will return integer baud on success */ static int mask_to_rate (unsigned mask) { int rate; @@ -438,11 +443,12 @@ static int generic_startup (struct usb_serial *serial) usb_reset_configuration (serial->dev); + interval = 1; priv->cmd_ctrl = 0; priv->line_control = 0; priv->termios_initialized = 0; - priv->calledfromopen = 0; priv->rx_flags = 0; + priv->cbr_mask = B300; usb_set_serial_port_data(serial->port[0], priv); return (0); @@ -513,7 +519,6 @@ static int cypress_open (struct usb_serial_port *port, struct file *filp) dbg("%s - port %d", __FUNCTION__, port->number); /* clear halts before open */ - usb_clear_halt(serial->dev, 0x00); usb_clear_halt(serial->dev, 0x81); usb_clear_halt(serial->dev, 0x02); @@ -531,7 +536,6 @@ static int cypress_open (struct usb_serial_port *port, struct file *filp) /* raise both lines and set termios */ spin_lock_irqsave(&priv->lock, flags); priv->line_control = CONTROL_DTR | CONTROL_RTS; - priv->calledfromopen = 1; priv->cmd_ctrl = 1; spin_unlock_irqrestore(&priv->lock, flags); result = cypress_write(port, NULL, 0); @@ -553,7 +557,7 @@ static int cypress_open (struct usb_serial_port *port, struct file *filp) usb_fill_int_urb(port->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, - cypress_read_int_callback, port, port->interrupt_in_urb->interval); + cypress_read_int_callback, port, interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result){ @@ -680,12 +684,12 @@ static void cypress_send(struct usb_serial_port *port) spin_lock_irqsave(&priv->lock, flags); switch (port->interrupt_out_size) { case 32: - // this is for the CY7C64013... + /* this is for the CY7C64013... */ offset = 2; port->interrupt_out_buffer[0] = priv->line_control; break; case 8: - // this is for the CY7C63743... + /* this is for the CY7C63743... */ offset = 1; port->interrupt_out_buffer[0] = priv->line_control; break; @@ -738,6 +742,7 @@ send: port->interrupt_out_urb->transfer_buffer_length = actual_size; port->interrupt_out_urb->dev = port->serial->dev; + port->interrupt_out_urb->interval = interval; result = usb_submit_urb (port->interrupt_out_urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __FUNCTION__, @@ -910,7 +915,7 @@ static void cypress_set_termios (struct usb_serial_port *port, struct termios *o unsigned cflag, iflag, baud_mask; unsigned long flags; __u8 oldlines; - int linechange; + int linechange = 0; dbg("%s - port %d", __FUNCTION__, port->number); @@ -996,15 +1001,7 @@ static void cypress_set_termios (struct usb_serial_port *port, struct termios *o case B115200: dbg("%s - setting baud 115200bps", __FUNCTION__); break; default: dbg("%s - unknown masked baud rate", __FUNCTION__); } - priv->line_control |= CONTROL_DTR; - - /* toggle CRTSCTS? - don't do this if being called from cypress_open */ - if (!priv->calledfromopen) { - if (cflag & CRTSCTS) - priv->line_control |= CONTROL_RTS; - else - priv->line_control &= ~CONTROL_RTS; - } + priv->line_control = (CONTROL_DTR | CONTROL_RTS); } spin_unlock_irqrestore(&priv->lock, flags); @@ -1014,8 +1011,6 @@ static void cypress_set_termios (struct usb_serial_port *port, struct termios *o cypress_serial_control(port, baud_mask, data_bits, stop_bits, parity_enable, parity_type, 0, CYPRESS_SET_CONFIG); - msleep(50); /* give some time between change and read (50ms) */ - /* we perform a CYPRESS_GET_CONFIG so that the current settings are filled into the private structure * this should confirm that all is working if it returns what we just set */ cypress_serial_control(port, 0, 0, 0, 0, 0, 0, CYPRESS_GET_CONFIG); @@ -1031,7 +1026,6 @@ static void cypress_set_termios (struct usb_serial_port *port, struct termios *o dbg("Using custom termios settings for a baud rate of 4800bps."); /* define custom termios settings for NMEA protocol */ - tty->termios->c_iflag /* input modes - */ &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ @@ -1052,23 +1046,16 @@ static void cypress_set_termios (struct usb_serial_port *port, struct termios *o | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ - } else if (priv->chiptype == CT_CYPHIDCOM) { - - // Software app handling it for device... + } /* CT_CYPHIDCOM: Application should handle this for device */ - } linechange = (priv->line_control != oldlines); spin_unlock_irqrestore(&priv->lock, flags); /* if necessary, set lines */ - if (!priv->calledfromopen && linechange) { + if (linechange) { priv->cmd_ctrl = 1; cypress_write(port, NULL, 0); } - - if (priv->calledfromopen) - priv->calledfromopen = 0; - } /* cypress_set_termios */ @@ -1164,7 +1151,7 @@ static void cypress_read_int_callback(struct urb *urb, struct pt_regs *regs) spin_lock_irqsave(&priv->lock, flags); switch(urb->actual_length) { case 32: - // This is for the CY7C64013... + /* This is for the CY7C64013... */ priv->current_status = data[0] & 0xF8; bytes = data[1]+2; i=2; @@ -1172,7 +1159,7 @@ static void cypress_read_int_callback(struct urb *urb, struct pt_regs *regs) havedata = 1; break; case 8: - // This is for the CY7C63743... + /* This is for the CY7C63743... */ priv->current_status = data[0] & 0xF8; bytes = (data[0] & 0x07)+1; i=1; @@ -1245,7 +1232,7 @@ continue_read: port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, cypress_read_int_callback, port, - port->interrupt_in_urb->interval); + interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result); @@ -1274,6 +1261,8 @@ static void cypress_write_int_callback(struct urb *urb, struct pt_regs *regs) dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); priv->write_urb_in_use = 0; return; + case -EPIPE: /* no break needed */ + usb_clear_halt(port->serial->dev, 0x02); default: /* error in the urb, so we have to resubmit it */ dbg("%s - Overflow in write", __FUNCTION__); @@ -1535,3 +1524,5 @@ module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); module_param(stats, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(stats, "Enable statistics or not"); +module_param(interval, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(interval, "Overrides interrupt interval"); -- cgit v0.10.2 From 03c41c434775c52092d17a5031ad8ebaaf555bc4 Mon Sep 17 00:00:00 2001 From: Ed L Cashin Date: Fri, 29 Apr 2005 10:24:03 -0400 Subject: [PATCH] aoe: improve allowed interfaces configuration improve allowed interfaces configuration Signed-off-by: Ed L. Cashin Signed-off-by: Greg Kroah-Hartman diff -uprN a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt diff --git a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt index 43e5010..1212987 100644 --- a/Documentation/aoe/aoe.txt +++ b/Documentation/aoe/aoe.txt @@ -33,6 +33,9 @@ USING DEVICE NODES "cat /dev/etherd/err" blocks, waiting for error diagnostic output, like any retransmitted packets. + The /dev/etherd/interfaces special file is obsoleted by the + aoe_iflist boot option and module option (and its sysfs entry + described in the next section). "echo eth2 eth4 > /dev/etherd/interfaces" tells the aoe driver to limit ATA over Ethernet traffic to eth2 and eth4. AoE traffic from untrusted networks should be ignored as a matter of security. @@ -89,3 +92,23 @@ USING SYSFS e4.7 eth1 up e4.8 eth1 up e4.9 eth1 up + + Use /sys/module/aoe/parameters/aoe_iflist (or better, the driver + option discussed below) instead of /dev/etherd/interfaces to limit + AoE traffic to the network interfaces in the given + whitespace-separated list. Unlike the old character device, the + sysfs entry can be read from as well as written to. + + It's helpful to trigger discovery after setting the list of allowed + interfaces. If your distro provides an aoe-discover script, you can + use that. Otherwise, you can directly use the /dev/etherd/discover + file described above. + +DRIVER OPTIONS + + There is a boot option for the built-in aoe driver and a + corresponding module parameter, aoe_iflist. Without this option, + all network interfaces may be used for ATA over Ethernet. Here is a + usage example for the module parameter. + + modprobe aoe_iflist="eth1 eth3" diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index bc92aac..9e6f51c 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "aoe.h" #define NECODES 5 @@ -26,6 +27,19 @@ enum { }; static char aoe_iflist[IFLISTSZ]; +module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600); +MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\"\n"); + +#ifndef MODULE +static int __init aoe_iflist_setup(char *str) +{ + strncpy(aoe_iflist, str, IFLISTSZ); + aoe_iflist[IFLISTSZ - 1] = '\0'; + return 1; +} + +__setup("aoe_iflist=", aoe_iflist_setup); +#endif int is_aoe_netif(struct net_device *ifp) @@ -36,7 +50,8 @@ is_aoe_netif(struct net_device *ifp) if (aoe_iflist[0] == '\0') return 1; - for (p = aoe_iflist; *p; p = q + strspn(q, WHITESPACE)) { + p = aoe_iflist + strspn(aoe_iflist, WHITESPACE); + for (; *p; p = q + strspn(q, WHITESPACE)) { q = p + strcspn(p, WHITESPACE); if (q != p) len = q - p; -- cgit v0.10.2 From c59a24dc512952cb434b34a66c3792555159fa36 Mon Sep 17 00:00:00 2001 From: Ed L Cashin Date: Fri, 29 Apr 2005 10:24:13 -0400 Subject: [PATCH] aoe: aoe-stat should work for built-in as well as module aoe-stat should work for built-in as well as module Signed-off-by: Ed L. Cashin Signed-off-by: Greg Kroah-Hartman diff -uprN a/Documentation/aoe/status.sh b/Documentation/aoe/status.sh diff --git a/Documentation/aoe/status.sh b/Documentation/aoe/status.sh index 6628116..751f3be 100644 --- a/Documentation/aoe/status.sh +++ b/Documentation/aoe/status.sh @@ -14,10 +14,6 @@ test ! -d "$sysd/block" && { echo "$me Error: sysfs is not mounted" 1>&2 exit 1 } -test -z "`lsmod | grep '^aoe'`" && { - echo "$me Error: aoe module is not loaded" 1>&2 - exit 1 -} for d in `ls -d $sysd/block/etherd* 2>/dev/null | grep -v p` end; do # maybe ls comes up empty, so we use "end" -- cgit v0.10.2 From 67d9f84786cc4fd42cb40c9474c248eadaff15c6 Mon Sep 17 00:00:00 2001 From: Ed L Cashin Date: Fri, 29 Apr 2005 10:24:18 -0400 Subject: [PATCH] aoe: update the documentation to mention aoetools update the documentation to mention aoetools Signed-off-by: Ed L. Cashin Signed-off-by: Greg Kroah-Hartman diff --git a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt index 1212987..3a4dbe4 100644 --- a/Documentation/aoe/aoe.txt +++ b/Documentation/aoe/aoe.txt @@ -4,6 +4,16 @@ The EtherDrive (R) HOWTO for users of 2.6 kernels is found at ... It has many tips and hints! +The aoetools are userland programs that are designed to work with this +driver. The aoetools are on sourceforge. + + http://aoetools.sourceforge.net/ + +The scripts in this Documentation/aoe directory are intended to +document the use of the driver and are not necessary if you install +the aoetools. + + CREATING DEVICE NODES Users of udev should find the block device nodes created @@ -33,19 +43,17 @@ USING DEVICE NODES "cat /dev/etherd/err" blocks, waiting for error diagnostic output, like any retransmitted packets. - The /dev/etherd/interfaces special file is obsoleted by the - aoe_iflist boot option and module option (and its sysfs entry - described in the next section). "echo eth2 eth4 > /dev/etherd/interfaces" tells the aoe driver to limit ATA over Ethernet traffic to eth2 and eth4. AoE traffic from - untrusted networks should be ignored as a matter of security. + untrusted networks should be ignored as a matter of security. See + also the aoe_iflist driver option described below. "echo > /dev/etherd/discover" tells the driver to find out what AoE devices are available. These character devices may disappear and be replaced by sysfs - counterparts, so distribution maintainers are encouraged to create - scripts that use these devices. + counterparts. Using the commands in aoetools insulates users from + these implementation details. The block devices are named like this: @@ -69,7 +77,8 @@ USING SYSFS through which we are communicating with the remote AoE device. There is a script in this directory that formats this information - in a convenient way. + in a convenient way. Users with aoetools can use the aoe-stat + command. root@makki root# sh Documentation/aoe/status.sh e10.0 eth3 up @@ -100,9 +109,9 @@ USING SYSFS sysfs entry can be read from as well as written to. It's helpful to trigger discovery after setting the list of allowed - interfaces. If your distro provides an aoe-discover script, you can - use that. Otherwise, you can directly use the /dev/etherd/discover - file described above. + interfaces. The aoetools package provides an aoe-discover script + for this purpose. You can also directly use the + /dev/etherd/discover special file described above. DRIVER OPTIONS -- cgit v0.10.2 From 93d489fc56f819d8805d80ae83cbafc5e5719804 Mon Sep 17 00:00:00 2001 From: Ed L Cashin Date: Fri, 29 Apr 2005 10:24:22 -0400 Subject: [PATCH] aoe: allow multiple aoe devices to have the same mac allow multiple aoe devices to have the same mac Signed-off-by: Ed L. Cashin Signed-off-by: Greg Kroah-Hartman diff -u b/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index ec16c64..6e231c5 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -109,25 +109,22 @@ aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bu spin_lock_irqsave(&devlist_lock, flags); for (d=devlist; d; d=d->next) - if (d->sysminor == sysminor - || memcmp(d->addr, addr, sizeof d->addr) == 0) + if (d->sysminor == sysminor) break; if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) { spin_unlock_irqrestore(&devlist_lock, flags); printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n"); return NULL; - } + } /* if newdev, (d->flags & DEVFL_UP) == 0 for below */ spin_unlock_irqrestore(&devlist_lock, flags); spin_lock_irqsave(&d->lock, flags); d->ifp = ifp; - - if (d->sysminor != sysminor - || (d->flags & DEVFL_UP) == 0) { + memcpy(d->addr, addr, sizeof d->addr); + if ((d->flags & DEVFL_UP) == 0) { aoedev_downdev(d); /* flushes outstanding frames */ - memcpy(d->addr, addr, sizeof d->addr); d->sysminor = sysminor; d->aoemajor = AOEMAJOR(sysminor); d->aoeminor = AOEMINOR(sysminor); -- cgit v0.10.2 From 4613ed277ab8a41640434181898ef4649cc7301e Mon Sep 17 00:00:00 2001 From: Ed L Cashin Date: Fri, 29 Apr 2005 10:24:25 -0400 Subject: [PATCH] aoe: add firmware version to info in sysfs add firmware version to info in sysfs Signed-off-by: Ed L. Cashin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 4780f79..0e97fcb 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -37,6 +37,13 @@ static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page) return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name); } +/* firmware version */ +static ssize_t aoedisk_show_fwver(struct gendisk * disk, char *page) +{ + struct aoedev *d = disk->private_data; + + return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver); +} static struct disk_attribute disk_attr_state = { .attr = {.name = "state", .mode = S_IRUGO }, @@ -50,6 +57,10 @@ static struct disk_attribute disk_attr_netif = { .attr = {.name = "netif", .mode = S_IRUGO }, .show = aoedisk_show_netif }; +static struct disk_attribute disk_attr_fwver = { + .attr = {.name = "firmware-version", .mode = S_IRUGO }, + .show = aoedisk_show_fwver +}; static void aoedisk_add_sysfs(struct aoedev *d) @@ -57,6 +68,7 @@ aoedisk_add_sysfs(struct aoedev *d) sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr); sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr); sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr); + sysfs_create_file(&d->gd->kobj, &disk_attr_fwver.attr); } void aoedisk_rm_sysfs(struct aoedev *d) @@ -64,6 +76,7 @@ aoedisk_rm_sysfs(struct aoedev *d) sysfs_remove_link(&d->gd->kobj, "state"); sysfs_remove_link(&d->gd->kobj, "mac"); sysfs_remove_link(&d->gd->kobj, "netif"); + sysfs_remove_link(&d->gd->kobj, "firmware-version"); } static int -- cgit v0.10.2 From 0e57c7166675a86293f150d5ef7779edd629fe2a Mon Sep 17 00:00:00 2001 From: Ed L Cashin Date: Fri, 29 Apr 2005 10:24:28 -0400 Subject: [PATCH] aoe: update version number to 10 update version number to 10 Signed-off-by: Ed L. Cashin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index aa8b547..721ba80 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "6" +#define VERSION "10" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" -- cgit v0.10.2 From ceb43744cd48a20212e2179e0c7ff2f450a3c97e Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Fri, 8 Apr 2005 14:53:31 +0900 Subject: [PATCH] PCI: 'is_enabled' flag should be set/cleared when the device is actually enabled/disabled I think 'is_enabled' flag in pci_dev structure should be set/cleared when the device actually enabled/disabled. Especially about pci_enable_device(), it can be failed. By this change, we will also get the possibility of refering 'is_enabled' flag from the functions called through pci_enable_device()/pci_disable_device(). Signed-off-by: Kenji Kaneshige Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index bfbff83..fc8cc6c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -398,10 +398,10 @@ pci_enable_device(struct pci_dev *dev) { int err; - dev->is_enabled = 1; if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1))) return err; pci_fixup_device(pci_fixup_enable, dev); + dev->is_enabled = 1; return 0; } @@ -427,16 +427,15 @@ pci_disable_device(struct pci_dev *dev) { u16 pci_command; - dev->is_enabled = 0; - dev->is_busmaster = 0; - pci_read_config_word(dev, PCI_COMMAND, &pci_command); if (pci_command & PCI_COMMAND_MASTER) { pci_command &= ~PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); } + dev->is_busmaster = 0; pcibios_disable_device(dev); + dev->is_enabled = 0; } /** -- cgit v0.10.2 From a3ea7fbac12fdb2d70c90bb36f81afa3c66e18f4 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 29 Mar 2005 19:08:48 +0100 Subject: [PATCH] PCI: update PCI documentation for pci_get_slot() depreciation pci_find_slot() doesn't work on multiple-domain boxes so pci_get_slot() should be used instead. Signed-off-by: Matthew Wilcox Signed-off-by: Greg Kroah-Hartman diff --git a/Documentation/pci.txt b/Documentation/pci.txt index 67514bf..62b1dc5 100644 --- a/Documentation/pci.txt +++ b/Documentation/pci.txt @@ -279,6 +279,7 @@ pci_for_each_dev_reverse() Superseded by pci_find_device_reverse() pci_for_each_bus() Superseded by pci_find_next_bus() pci_find_device() Superseded by pci_get_device() pci_find_subsys() Superseded by pci_get_subsys() +pci_find_slot() Superseded by pci_get_slot() pcibios_find_class() Superseded by pci_get_class() pci_find_class() Superseded by pci_get_class() pci_(read|write)_*_nodev() Superseded by pci_bus_(read|write)_*() -- cgit v0.10.2 From 92df516e6264f9caff4be49718926d6884fa50ed Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Tue, 5 Apr 2005 23:49:49 +0200 Subject: [PATCH] PCI: fix stale PCI pm docs This fixes u32 vs. pm_message_t confusion in documentation, and removes references to no-longer-existing (*save_state), too. With exception of USB (I hope David will fix/apply my patch), this should fix last piece of this confusion... famous last words. Signed-off-by: Pavel Machek Signed-off-by: Greg Kroah-Hartman diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index c85428e..35b1a7d 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt @@ -165,40 +165,9 @@ Description: These functions are intended for use by individual drivers, and are defined in struct pci_driver: - int (*save_state) (struct pci_dev *dev, u32 state); - int (*suspend) (struct pci_dev *dev, u32 state); + int (*suspend) (struct pci_dev *dev, pm_message_t state); int (*resume) (struct pci_dev *dev); - int (*enable_wake) (struct pci_dev *dev, u32 state, int enable); - - -save_state ----------- - -Usage: - -if (dev->driver && dev->driver->save_state) - dev->driver->save_state(dev,state); - -The driver should use this callback to save device state. It should take into -account the current state of the device and the requested state in order to -avoid any unnecessary operations. - -For example, a video card that supports all 4 states (D0-D3), all controller -context is preserved when entering D1, but the screen is placed into a low power -state (blanked). - -The driver can also interpret this function as a notification that it may be -entering a sleep state in the near future. If it knows that the device cannot -enter the requested state, either because of lack of support for it, or because -the device is middle of some critical operation, then it should fail. - -This function should not be used to set any state in the device or the driver -because the device may not actually enter the sleep state (e.g. another driver -later causes causes a global state transition to fail). - -Note that in intermediate low power states, a device's I/O and memory spaces may -be disabled and may not be available in subsequent transitions to lower power -states. + int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); suspend -- cgit v0.10.2 From bc56b9e01190b9f1ad6b7c5c694b61bfe34c7aa5 Mon Sep 17 00:00:00 2001 From: Greg KH Date: Fri, 8 Apr 2005 14:53:31 +0900 Subject: [PATCH] PCI: Clean up a lot of sparse "Should it be static?" warnings. Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 968eb32..bc01d34 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -19,7 +19,7 @@ static u32 ctrlset_buf[3] = {0, 0, 0}; static u32 global_ctrlsets = 0; -u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; +static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; static acpi_status acpi_query_osc ( diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index fc8cc6c..88cbe5b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -16,6 +16,7 @@ #include #include #include /* isa_dma_bridge_buggy */ +#include "pci.h" /** diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6f0edad..b7ae878 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -9,6 +9,7 @@ #include #include #include +#include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 84cc4f6..e68bbfb 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -15,6 +15,7 @@ #include #include +#include "pci.h" static int proc_initialized; /* = 0 */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 15a3980..00388a1 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -18,6 +18,7 @@ #include #include #include +#include "pci.h" /* Deal with broken BIOS'es that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ -- cgit v0.10.2 From 4c0619add8c3a8b28e7fae8b15cc7b62de2f8148 Mon Sep 17 00:00:00 2001 From: "ssant@in.ibm.com" Date: Fri, 8 Apr 2005 14:53:31 +0900 Subject: [PATCH] PCI: fix up word-aligned 16-bit PCI config access through sysfs This patch adds the possibility to do word-aligned 16-bit atomic PCI configuration space accesses via the sysfs PCI interface. As a result, problems with Emulex LFPC on IBM PowerPC64 are fixed. Patch is present in SLES 9 SP1. Signed-off-by: Vojtech Pavlik Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index d57ae71..8568b20 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -91,6 +91,7 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count) struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); unsigned int size = 64; loff_t init_off = off; + u8 *data = (u8*) buf; /* Several chips lock up trying to read undefined config space */ if (capable(CAP_SYS_ADMIN)) { @@ -108,30 +109,47 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count) size = count; } - while (off & 3) { - unsigned char val; + if ((off & 1) && size) { + u8 val; pci_read_config_byte(dev, off, &val); - buf[off - init_off] = val; + data[off - init_off] = val; off++; - if (--size == 0) - break; + size--; + } + + if ((off & 3) && size > 2) { + u16 val; + pci_read_config_word(dev, off, &val); + data[off - init_off] = val & 0xff; + data[off - init_off + 1] = (val >> 8) & 0xff; + off += 2; + size -= 2; } while (size > 3) { - unsigned int val; + u32 val; pci_read_config_dword(dev, off, &val); - buf[off - init_off] = val & 0xff; - buf[off - init_off + 1] = (val >> 8) & 0xff; - buf[off - init_off + 2] = (val >> 16) & 0xff; - buf[off - init_off + 3] = (val >> 24) & 0xff; + data[off - init_off] = val & 0xff; + data[off - init_off + 1] = (val >> 8) & 0xff; + data[off - init_off + 2] = (val >> 16) & 0xff; + data[off - init_off + 3] = (val >> 24) & 0xff; off += 4; size -= 4; } - while (size > 0) { - unsigned char val; + if (size >= 2) { + u16 val; + pci_read_config_word(dev, off, &val); + data[off - init_off] = val & 0xff; + data[off - init_off + 1] = (val >> 8) & 0xff; + off += 2; + size -= 2; + } + + if (size > 0) { + u8 val; pci_read_config_byte(dev, off, &val); - buf[off - init_off] = val; + data[off - init_off] = val; off++; --size; } @@ -145,6 +163,7 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count) struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj)); unsigned int size = count; loff_t init_off = off; + u8 *data = (u8*) buf; if (off > dev->cfg_size) return 0; @@ -152,26 +171,41 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count) size = dev->cfg_size - off; count = size; } - - while (off & 3) { - pci_write_config_byte(dev, off, buf[off - init_off]); + + if ((off & 1) && size) { + pci_write_config_byte(dev, off, data[off - init_off]); off++; - if (--size == 0) - break; + size--; } + + if ((off & 3) && size > 2) { + u16 val = data[off - init_off]; + val |= (u16) data[off - init_off + 1] << 8; + pci_write_config_word(dev, off, val); + off += 2; + size -= 2; + } while (size > 3) { - unsigned int val = buf[off - init_off]; - val |= (unsigned int) buf[off - init_off + 1] << 8; - val |= (unsigned int) buf[off - init_off + 2] << 16; - val |= (unsigned int) buf[off - init_off + 3] << 24; + u32 val = data[off - init_off]; + val |= (u32) data[off - init_off + 1] << 8; + val |= (u32) data[off - init_off + 2] << 16; + val |= (u32) data[off - init_off + 3] << 24; pci_write_config_dword(dev, off, val); off += 4; size -= 4; } + + if (size >= 2) { + u16 val = data[off - init_off]; + val |= (u16) data[off - init_off + 1] << 8; + pci_write_config_word(dev, off, val); + off += 2; + size -= 2; + } - while (size > 0) { - pci_write_config_byte(dev, off, buf[off - init_off]); + if (size) { + pci_write_config_byte(dev, off, data[off - init_off]); off++; --size; } -- cgit v0.10.2 From c8958177224622411b9979eabb5610e30b06034b Mon Sep 17 00:00:00 2001 From: Greg KH Date: Fri, 8 Apr 2005 14:53:31 +0900 Subject: [PATCH] PCI: Add pci shutdown ability Now pci drivers can know when the system is going down without having to add a reboot notifier event. Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 37b7961..b42466c 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -318,6 +318,14 @@ static int pci_device_resume(struct device * dev) return 0; } +static void pci_device_shutdown(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct pci_driver *drv = pci_dev->driver; + + if (drv && drv->shutdown) + drv->shutdown(pci_dev); +} #define kobj_to_pci_driver(obj) container_of(obj, struct device_driver, kobj) #define attr_to_driver_attribute(obj) container_of(obj, struct driver_attribute, attr) @@ -385,6 +393,7 @@ int pci_register_driver(struct pci_driver *drv) drv->driver.bus = &pci_bus_type; drv->driver.probe = pci_device_probe; drv->driver.remove = pci_device_remove; + drv->driver.shutdown = pci_device_shutdown, drv->driver.owner = drv->owner; drv->driver.kobj.ktype = &pci_driver_kobj_type; pci_init_dynids(&drv->dynids); diff --git a/include/linux/pci.h b/include/linux/pci.h index 3c89148..cff5ba3 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -671,6 +671,7 @@ struct pci_driver { int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ int (*resume) (struct pci_dev *dev); /* Device woken up */ int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */ + void (*shutdown) (struct pci_dev *dev); struct device_driver driver; struct pci_dynids dynids; -- cgit v0.10.2 From 034ecc724cc6ba662d0b2b5a1e11e7e66a768596 Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Mon, 11 Apr 2005 15:01:54 +0200 Subject: [PATCH] PCI Hotplug ibmphp_pci.c: Fix masking out needed information too early here is the patch that fixes the bug introduced by my previous patch which already went into 2.6.12-rc2 and is likely to cause trouble is someone hits one the else case here by accident. Using the &= operation before the if statement destroys the information the if asks for so we always go into the else branch. Signed-off-by: Rolf Eike Beer Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c index 2335fac..8122fe7 100644 --- a/drivers/pci/hotplug/ibmphp_pci.c +++ b/drivers/pci/hotplug/ibmphp_pci.c @@ -1308,10 +1308,10 @@ static int unconfigure_boot_device (u8 busno, u8 device, u8 function) /* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */ } else { /* This is Memory */ - start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ debug ("start address of pfmem is %x\n", start_address); + start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) { err ("cannot find corresponding PFMEM resource to remove\n"); @@ -1325,6 +1325,8 @@ static int unconfigure_boot_device (u8 busno, u8 device, u8 function) } else { /* regular memory */ debug ("start address of mem is %x\n", start_address); + start_address &= PCI_BASE_ADDRESS_MEM_MASK; + if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) { err ("cannot find corresponding MEM resource to remove\n"); return -EIO; @@ -1422,9 +1424,9 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function) /* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */ } else { /* This is Memory */ - start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) { /* pfmem */ + start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) { err ("cannot find corresponding PFMEM resource to remove\n"); return -EINVAL; @@ -1436,6 +1438,7 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function) } } else { /* regular memory */ + start_address &= PCI_BASE_ADDRESS_MEM_MASK; if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) { err ("cannot find corresponding MEM resource to remove\n"); return -EINVAL; -- cgit v0.10.2 From 3aa8c4febf74b1f23bd9fc329321af6d531fe4dd Mon Sep 17 00:00:00 2001 From: "R.Marek@sh.cvut.cz" Date: Thu, 21 Apr 2005 10:49:06 +0000 Subject: [PATCH] PCI: Rapid Hance quirk This patch just adds Intel's Hance Rapid south bridge IDs to ICH4 region quirk. Patch was successfuly tested by Chunhao Huang from Winbond. Signed-Off-By: Rudolf Marek Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 00388a1..026aa04 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -329,6 +329,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi ); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi ); /* * VIA ACPI: One IO region pointed to by longword at -- cgit v0.10.2 From eaae4b3a84a3781543a32bcaf0a33306ae915574 Mon Sep 17 00:00:00 2001 From: Steven Cole Date: Tue, 3 May 2005 18:38:30 -0600 Subject: [PATCH] PCI: Spelling fixes for drivers/pci. Here are some spelling corrections for drivers/pci. CONTROLER -> CONTROLLER Regisetr -> Register harware -> hardware inital -> initial Initilize -> Initialize funtion -> function funciton -> function occured -> occurred Signed-off-by: Steven Cole Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index 5bc039d..c22e028 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h @@ -196,7 +196,7 @@ struct ebda_hpc_bus { /******************************************************************** -* THREE TYPE OF HOT PLUG CONTROLER * +* THREE TYPE OF HOT PLUG CONTROLLER * ********************************************************************/ struct isa_ctlr_access { diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 6894b54..1a3eb8d 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c @@ -64,7 +64,7 @@ static int to_debug = FALSE; #define WPG_I2C_OR 0x2000 // I2C OR operation //---------------------------------------------------------------------------- -// Command set for I2C Master Operation Setup Regisetr +// Command set for I2C Master Operation Setup Register //---------------------------------------------------------------------------- #define WPG_READATADDR_MASK 0x00010000 // read,bytes,I2C shifted,index #define WPG_WRITEATADDR_MASK 0x40010000 // write,bytes,I2C shifted,index @@ -835,7 +835,7 @@ static void poll_hpc (void) if (ibmphp_shutdown) break; - /* try to get the lock to do some kind of harware access */ + /* try to get the lock to do some kind of hardware access */ down (&semOperations); switch (poll_state) { @@ -906,7 +906,7 @@ static void poll_hpc (void) poll_state = POLL_LATCH_REGISTER; break; } - /* give up the harware semaphore */ + /* give up the hardware semaphore */ up (&semOperations); /* sleep for a short time just for good measure */ msleep(100); diff --git a/drivers/pci/hotplug/pci_hotplug.h b/drivers/pci/hotplug/pci_hotplug.h index 57ace32..88d44f7 100644 --- a/drivers/pci/hotplug/pci_hotplug.h +++ b/drivers/pci/hotplug/pci_hotplug.h @@ -150,7 +150,7 @@ struct hotplug_slot_info { * @name: the name of the slot being registered. This string must * be unique amoung slots registered on this system. * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot - * @info: pointer to the &struct hotplug_slot_info for the inital values for + * @info: pointer to the &struct hotplug_slot_info for the initial values for * this slot. * @release: called during pci_hp_deregister to free memory allocated in a * hotplug_slot structure. diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c index 6605d6b..3194d51c 100644 --- a/drivers/pci/hotplug/pcihp_skeleton.c +++ b/drivers/pci/hotplug/pcihp_skeleton.c @@ -297,7 +297,7 @@ static int __init init_slots(void) hotplug_slot->ops = &skel_hotplug_slot_ops; /* - * Initilize the slot info structure with some known + * Initialize the slot info structure with some known * good values. */ info->power_status = get_power_status(slot); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 22ecd3b..30206ac 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -522,7 +522,7 @@ void pci_scan_msi_device(struct pci_dev *dev) * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function * - * Setup the MSI capability structure of device funtion with a single + * Setup the MSI capability structure of device function with a single * MSI vector, regardless of device function is capable of handling * multiple messages. A return of zero indicates the successful setup * of an entry zero with the new MSI vector or non-zero for otherwise. @@ -599,7 +599,7 @@ static int msi_capability_init(struct pci_dev *dev) * msix_capability_init - configure device's MSI-X capability * @dev: pointer to the pci_dev data structure of MSI-X device function * - * Setup the MSI-X capability structure of device funtion with a + * Setup the MSI-X capability structure of device function with a * single MSI-X vector. A return of zero indicates the successful setup of * requested MSI-X entries with allocated vectors or non-zero for otherwise. **/ @@ -1074,7 +1074,7 @@ void pci_disable_msix(struct pci_dev* dev) * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state * @dev: pointer to the pci_dev data structure of MSI(X) device function * - * Being called during hotplug remove, from which the device funciton + * Being called during hotplug remove, from which the device function * is hot-removed. All previous assigned MSI/MSI-X vectors, if * allocated for this device function, are reclaimed to unused state, * which may be used later on. diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index b42466c..fe98553 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -381,7 +381,7 @@ pci_populate_driver_dir(struct pci_driver *drv) * * Adds the driver structure to the list of registered drivers. * Returns a negative value on error, otherwise 0. - * If no error occured, the driver remains registered even if + * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int pci_register_driver(struct pci_driver *drv) -- cgit v0.10.2 From b308240b49ff5a1bddc6e10513c2c83f37a0bc78 Mon Sep 17 00:00:00 2001 From: Dely Sy Date: Thu, 28 Apr 2005 18:08:53 -0700 Subject: [PATCH] PCI Hotplug: fix pciehp regression I fogot to remove the code that freed the memory in cleanup_slots(). Here is the new patch, which I have also taken care of the comment by Eike to remove the cast in hotplug_slot->private. Signed-off-by: Dely Sy Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 72baf74..ed1fd8d 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -90,6 +90,22 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = { .get_cur_bus_speed = get_cur_bus_speed, }; +/** + * release_slot - free up the memory used by a slot + * @hotplug_slot: slot to free + */ +static void release_slot(struct hotplug_slot *hotplug_slot) +{ + struct slot *slot = hotplug_slot->private; + + dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); + + kfree(slot->hotplug_slot->info); + kfree(slot->hotplug_slot->name); + kfree(slot->hotplug_slot); + kfree(slot); +} + static int init_slots(struct controller *ctrl) { struct slot *new_slot; @@ -139,7 +155,8 @@ static int init_slots(struct controller *ctrl) /* register this slot with the hotplug pci core */ new_slot->hotplug_slot->private = new_slot; - make_slot_name (new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot); + new_slot->hotplug_slot->release = &release_slot; + make_slot_name(new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot); new_slot->hotplug_slot->ops = &pciehp_hotplug_slot_ops; new_slot->hpc_ops->get_power_status(new_slot, &(new_slot->hotplug_slot->info->power_status)); @@ -188,10 +205,6 @@ static int cleanup_slots (struct controller * ctrl) while (old_slot) { next_slot = old_slot->next; pci_hp_deregister (old_slot->hotplug_slot); - kfree(old_slot->hotplug_slot->info); - kfree(old_slot->hotplug_slot->name); - kfree(old_slot->hotplug_slot); - kfree(old_slot); old_slot = next_slot; } -- cgit v0.10.2 From 9171078ab5a0bbb516029cfc61378e0350a7b30d Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sun, 1 May 2005 15:53:08 +0200 Subject: [PATCH] PCI: drivers/pci/pci.c: remove pci_dac_set_dma_mask pci_dac_set_dma_mask is currently completely unused. Signed-off-by: Adrian Bunk Signed-off-by: Greg Kroah-Hartman diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 94bcdb9..aa92e37 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -502,15 +502,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask) } int -pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) -{ - if (mask >= SZ_64M - 1 ) - return 0; - - return -EIO; -} - -int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { if (mask >= SZ_64M - 1 ) @@ -520,7 +511,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) } EXPORT_SYMBOL(pci_set_dma_mask); -EXPORT_SYMBOL(pci_dac_set_dma_mask); EXPORT_SYMBOL(pci_set_consistent_dma_mask); EXPORT_SYMBOL(ixp4xx_pci_read); EXPORT_SYMBOL(ixp4xx_pci_write); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 88cbe5b..f04b9ff 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -749,17 +749,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask) } int -pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) -{ - if (!pci_dac_dma_supported(dev, mask)) - return -EIO; - - dev->dma_mask = mask; - - return 0; -} - -int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { if (!pci_dma_supported(dev, mask)) @@ -821,7 +810,6 @@ EXPORT_SYMBOL(pci_set_master); EXPORT_SYMBOL(pci_set_mwi); EXPORT_SYMBOL(pci_clear_mwi); EXPORT_SYMBOL(pci_set_dma_mask); -EXPORT_SYMBOL(pci_dac_set_dma_mask); EXPORT_SYMBOL(pci_set_consistent_dma_mask); EXPORT_SYMBOL(pci_assign_resource); EXPORT_SYMBOL(pci_find_parent_resource); diff --git a/include/linux/pci.h b/include/linux/pci.h index cff5ba3..b5238bd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -811,7 +811,6 @@ void pci_set_master(struct pci_dev *dev); int pci_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); int pci_set_dma_mask(struct pci_dev *dev, u64 mask); -int pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask); int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); int pci_assign_resource(struct pci_dev *dev, int i); @@ -942,7 +941,6 @@ static inline void pci_set_master(struct pci_dev *dev) { } static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } static inline void pci_disable_device(struct pci_dev *dev) { } static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } -static inline int pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;} static inline int pci_register_driver(struct pci_driver *drv) { return 0;} static inline void pci_unregister_driver(struct pci_driver *drv) { } -- cgit v0.10.2 From 6b6bf51081a27e80334e7ebe2993ae1d046a3222 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Wed, 4 May 2005 09:11:49 -0500 Subject: JFS: Endian errors Thanks sparse! Signed-off-by: Dave Kleikamp diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c index 2c1f311..31b34db 100644 --- a/fs/jfs/jfs_xtree.c +++ b/fs/jfs/jfs_xtree.c @@ -1,5 +1,5 @@ /* - * Copyright (C) International Business Machines Corp., 2000-2004 + * Copyright (C) International Business Machines Corp., 2000-2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -688,7 +688,7 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp, /* search hit - internal page: * descend/search its child page */ - if (index < p->header.nextindex - 1) + if (index < le16_to_cpu(p->header.nextindex)-1) next = offsetXAD(&p->xad[index + 1]); goto next; } @@ -705,7 +705,7 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp, * base is the smallest index with key (Kj) greater than * search key (K) and may be zero or maxentry index. */ - if (base < p->header.nextindex) + if (base < le16_to_cpu(p->header.nextindex)) next = offsetXAD(&p->xad[base]); /* * search miss - leaf page: -- cgit v0.10.2 From 3c51f196b658fa1920c84b0752a55ed251d22d52 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:38:51 +0100 Subject: [PATCH] sparc NULL noise removal Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/arch/sparc/prom/memory.c b/arch/sparc/prom/memory.c index 46aa51a..c20e530 100644 --- a/arch/sparc/prom/memory.c +++ b/arch/sparc/prom/memory.c @@ -47,9 +47,9 @@ prom_sortmemlist(struct linux_mlist_v0 *thislist) char *tmpaddr; char *lowest; - for(i=0; thislist[i].theres_more != 0; i++) { + for(i=0; thislist[i].theres_more; i++) { lowest = thislist[i].start_adr; - for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++) + for(mitr = i+1; thislist[mitr-1].theres_more; mitr++) if(thislist[mitr].start_adr < lowest) { lowest = thislist[mitr].start_adr; swapi = mitr; @@ -85,7 +85,7 @@ void __init prom_meminit(void) prom_phys_total[iter].num_bytes = mptr->num_bytes; prom_phys_total[iter].theres_more = &prom_phys_total[iter+1]; } - prom_phys_total[iter-1].theres_more = 0x0; + prom_phys_total[iter-1].theres_more = NULL; /* Second, the total prom taken descriptors. */ for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0; mptr; mptr=mptr->theres_more, iter++) { @@ -93,7 +93,7 @@ void __init prom_meminit(void) prom_prom_taken[iter].num_bytes = mptr->num_bytes; prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1]; } - prom_prom_taken[iter-1].theres_more = 0x0; + prom_prom_taken[iter-1].theres_more = NULL; /* Last, the available physical descriptors. */ for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0; mptr; mptr=mptr->theres_more, iter++) { @@ -101,7 +101,7 @@ void __init prom_meminit(void) prom_phys_avail[iter].num_bytes = mptr->num_bytes; prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1]; } - prom_phys_avail[iter-1].theres_more = 0x0; + prom_phys_avail[iter-1].theres_more = NULL; /* Sort all the lists. */ prom_sortmemlist(prom_phys_total); prom_sortmemlist(prom_prom_taken); @@ -124,7 +124,7 @@ void __init prom_meminit(void) prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1]; } - prom_phys_avail[iter-1].theres_more = 0x0; + prom_phys_avail[iter-1].theres_more = NULL; num_regs = prom_getproperty(node, "reg", (char *) prom_reg_memlist, @@ -138,7 +138,7 @@ void __init prom_meminit(void) prom_phys_total[iter].theres_more = &prom_phys_total[iter+1]; } - prom_phys_total[iter-1].theres_more = 0x0; + prom_phys_total[iter-1].theres_more = NULL; node = prom_getchild(prom_root_node); node = prom_searchsiblings(node, "virtual-memory"); @@ -158,7 +158,7 @@ void __init prom_meminit(void) prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1]; } - prom_prom_taken[iter-1].theres_more = 0x0; + prom_prom_taken[iter-1].theres_more = NULL; prom_sortmemlist(prom_prom_taken); @@ -182,15 +182,15 @@ void __init prom_meminit(void) case PROM_SUN4: #ifdef CONFIG_SUN4 /* how simple :) */ - prom_phys_total[0].start_adr = 0x0; + prom_phys_total[0].start_adr = NULL; prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize); - prom_phys_total[0].theres_more = 0x0; - prom_prom_taken[0].start_adr = 0x0; + prom_phys_total[0].theres_more = NULL; + prom_prom_taken[0].start_adr = NULL; prom_prom_taken[0].num_bytes = 0x0; - prom_prom_taken[0].theres_more = 0x0; - prom_phys_avail[0].start_adr = 0x0; + prom_prom_taken[0].theres_more = NULL; + prom_phys_avail[0].start_adr = NULL; prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail); - prom_phys_avail[0].theres_more = 0x0; + prom_phys_avail[0].theres_more = NULL; #endif break; diff --git a/arch/sparc/prom/sun4prom.c b/arch/sparc/prom/sun4prom.c index 69ca735..00390a2 100644 --- a/arch/sparc/prom/sun4prom.c +++ b/arch/sparc/prom/sun4prom.c @@ -151,7 +151,7 @@ struct linux_romvec * __init sun4_prom_init(void) * have more time, we can teach the penguin to say "By your * command" or "Activating turbo boost, Michael". :-) */ - sun4_romvec->setLEDs(0x0); + sun4_romvec->setLEDs(NULL); printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n", sun4_romvec->monid, diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h index 780ee7f..caf9261 100644 --- a/include/asm-sparc/floppy.h +++ b/include/asm-sparc/floppy.h @@ -227,7 +227,7 @@ static __inline__ void sun_fd_disable_dma(void) doing_pdma = 0; if (pdma_base) { mmu_unlockarea(pdma_base, pdma_areasize); - pdma_base = 0; + pdma_base = NULL; } } -- cgit v0.10.2 From 9b52523aff51e3b245e6ec8887e3fcf190da4711 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:39:01 +0100 Subject: [PATCH] mbcs trivial user annotations Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c index ec71005..ac9cfa9 100644 --- a/drivers/char/mbcs.c +++ b/drivers/char/mbcs.c @@ -394,7 +394,7 @@ int mbcs_open(struct inode *ip, struct file *fp) return -ENODEV; } -ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off) +ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; @@ -419,7 +419,7 @@ ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off) } ssize_t -mbcs_sram_write(struct file * fp, const char *buf, size_t len, loff_t * off) +mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off) { struct cx_dev *cx_dev = fp->private_data; struct mbcs_soft *soft = cx_dev->soft; diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h index 844644d..e7fd47e 100644 --- a/drivers/char/mbcs.h +++ b/drivers/char/mbcs.h @@ -543,9 +543,9 @@ struct mbcs_soft { }; extern int mbcs_open(struct inode *ip, struct file *fp); -extern ssize_t mbcs_sram_read(struct file *fp, char *buf, size_t len, +extern ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len, loff_t * off); -extern ssize_t mbcs_sram_write(struct file *fp, const char *buf, size_t len, +extern ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len, loff_t * off); extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence); extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma); -- cgit v0.10.2 From 5cae841b13f23ccdf7e38b2400b5cf57deb57ccf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:39:22 +0100 Subject: [PATCH] ISA DMA Kconfig fixes - part 1 A bunch of drivers use ISA DMA helpers or their equivalents for platforms that have ISA with different DMA controller (a lot of ARM boxen). Currently there is no way to put such dependency in Kconfig - CONFIG_ISA is not it (e.g. it is not set on platforms that have no ISA slots, but have on-board devices that pretend to be ISA ones). New symbol added - ISA_DMA_API. Set when we have functional enable_dma()/set_dma_mode()/etc. set of helpers. Next patches in the series will add missing dependencies for drivers that need them. I'm very carefully staying the hell out of the recurring flamefest on what exactly CONFIG_ISA would mean in ideal world - added symbol has a well-defined meaning and for now I really want to treat it as completely independent from the mess around CONFIG_ISA. Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 0c79b9d..f7c9663 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -280,6 +280,10 @@ config ISA (MCA) or VESA. ISA is an older system, now being displaced by PCI; newer boards don't support it. If you have ISA, say Y, otherwise N. +config ISA_DMA_API + bool + default y + config PCI bool depends on !ALPHA_JENSEN diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4055115..8bfcb37 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -266,6 +266,10 @@ config ISA_DMA depends on FOOTBRIDGE_HOST || ARCH_SHARK default y +config ISA_DMA_API + bool + default y + config PCI bool "PCI support" if ARCH_INTEGRATOR_AP default y if ARCH_SHARK || FOOTBRIDGE_HOST || ARCH_IOP3XX || ARCH_IXP4XX || ARCH_IXP2000 diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig index 3955de5..6caed90 100644 --- a/arch/arm26/Kconfig +++ b/arch/arm26/Kconfig @@ -89,6 +89,10 @@ config PAGESIZE_16 machine with 4MB of memory. endmenu +config ISA_DMA_API + bool + default y + menu "General setup" # Compressed boot loader in ROM. Yes, we really want to ask about diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 99b4f29..fee5891 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -1173,6 +1173,10 @@ source "drivers/pci/pcie/Kconfig" source "drivers/pci/Kconfig" +config ISA_DMA_API + bool + default y + config ISA bool "ISA support" depends on !(X86_VOYAGER || X86_VISWS) diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index fc4615b..e729bd2 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig @@ -534,6 +534,11 @@ endchoice endmenu +config ISA_DMA_API + bool + depends on !M5272 + default y + menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" config PCI diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5e666aa..ab99446 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1656,3 +1656,7 @@ config GENERIC_HARDIRQS config GENERIC_IRQ_PROBE bool default y + +config ISA_DMA_API + bool + default y diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 5b5cd00..e7e7c56 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -45,6 +45,10 @@ config GENERIC_IRQ_PROBE config PM bool +config ISA_DMA_API + bool + default y + source "init/Kconfig" diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index c3d9413..ff04dcd 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -1079,6 +1079,10 @@ source kernel/power/Kconfig endmenu +config ISA_DMA_API + bool + default y + menu "Bus options" config ISA diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig index ef1f05e..f5508ab 100644 --- a/arch/ppc64/Kconfig +++ b/arch/ppc64/Kconfig @@ -293,6 +293,9 @@ config SECCOMP endmenu +config ISA_DMA_API + bool + default y menu "General setup" diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 722ea1d..3468d51 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -693,6 +693,10 @@ config RTC_9701JE endmenu +config ISA_DMA_API + bool + depends on MPC1211 + default y menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 80c38c5..44ee7f6 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -379,6 +379,11 @@ config GENERIC_IRQ_PROBE bool default y +# we have no ISA slots, but we do have ISA-style DMA. +config ISA_DMA_API + bool + default y + menu "Power management options" source kernel/power/Kconfig -- cgit v0.10.2 From 7fbacd5213a03b262bb17a826b166900e8b168ac Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:39:32 +0100 Subject: [PATCH] ISA_DMA Kconfig fixes - part 2 (parport_pc) Part of parport_pc that uses ISA DMA helpers made conditional on CONFIG_ISA_DMA_API. As the result, driver got usable for boxen that do not have ISA DMA stuff and have normal PCI parport card stuck into them - these never use DMA anyway. Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 731010e..16a2e6a 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -34,7 +34,7 @@ config PARPORT config PARPORT_PC tristate "PC-style hardware" - depends on PARPORT && (!SPARC64 || PCI) && (!SPARC32 || BROKEN) + depends on PARPORT && (!SPARC64 || PCI) && !SPARC32 ---help--- You should say Y here if you have a PC-style parallel port. All IBM PC compatible computers and some Alphas have PC-style diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index c5774e7..e7f3bcb 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -67,6 +67,10 @@ #define PARPORT_PC_MAX_PORTS PARPORT_MAX +#ifdef CONFIG_ISA_DMA_API +#define HAS_DMA +#endif + /* ECR modes */ #define ECR_SPP 00 #define ECR_PS2 01 @@ -610,6 +614,7 @@ dump_parport_state ("leave fifo_write_block_pio", port); return length - left; } +#ifdef HAS_DMA static size_t parport_pc_fifo_write_block_dma (struct parport *port, const void *buf, size_t length) { @@ -732,6 +737,17 @@ dump_parport_state ("enter fifo_write_block_dma", port); dump_parport_state ("leave fifo_write_block_dma", port); return length - left; } +#endif + +static inline size_t parport_pc_fifo_write_block(struct parport *port, + const void *buf, size_t length) +{ +#ifdef HAS_DMA + if (port->dma != PARPORT_DMA_NONE) + return parport_pc_fifo_write_block_dma (port, buf, length); +#endif + return parport_pc_fifo_write_block_pio (port, buf, length); +} /* Parallel Port FIFO mode (ECP chipsets) */ static size_t parport_pc_compat_write_block_pio (struct parport *port, @@ -758,10 +774,7 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port, port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; /* Write the data to the FIFO. */ - if (port->dma != PARPORT_DMA_NONE) - written = parport_pc_fifo_write_block_dma (port, buf, length); - else - written = parport_pc_fifo_write_block_pio (port, buf, length); + written = parport_pc_fifo_write_block(port, buf, length); /* Finish up. */ /* For some hardware we don't want to touch the mode until @@ -856,10 +869,7 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port, port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; /* Write the data to the FIFO. */ - if (port->dma != PARPORT_DMA_NONE) - written = parport_pc_fifo_write_block_dma (port, buf, length); - else - written = parport_pc_fifo_write_block_pio (port, buf, length); + written = parport_pc_fifo_write_block(port, buf, length); /* Finish up. */ /* For some hardware we don't want to touch the mode until @@ -2285,6 +2295,7 @@ struct parport *parport_pc_probe_port (unsigned long int base, } #ifdef CONFIG_PARPORT_PC_FIFO +#ifdef HAS_DMA if (p->dma != PARPORT_DMA_NONE) { if (request_dma (p->dma, p->name)) { printk (KERN_WARNING "%s: dma %d in use, " @@ -2306,7 +2317,8 @@ struct parport *parport_pc_probe_port (unsigned long int base, } } } -#endif /* CONFIG_PARPORT_PC_FIFO */ +#endif +#endif } /* Done probing. Now put the port into a sensible start-up state. */ @@ -2367,11 +2379,13 @@ void parport_pc_unregister_port (struct parport *p) if (p->modes & PARPORT_MODE_ECP) release_region(p->base_hi, 3); #ifdef CONFIG_PARPORT_PC_FIFO +#ifdef HAS_DMA if (priv->dma_buf) pci_free_consistent(priv->dev, PAGE_SIZE, priv->dma_buf, priv->dma_handle); -#endif /* CONFIG_PARPORT_PC_FIFO */ +#endif +#endif kfree (p->private_data); parport_put_port(p); kfree (ops); /* hope no-one cached it */ diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h index ab88349..b7e6355 100644 --- a/include/asm-sparc64/parport.h +++ b/include/asm-sparc64/parport.h @@ -13,6 +13,12 @@ #define PARPORT_PC_MAX_PORTS PARPORT_MAX +/* + * While sparc64 doesn't have an ISA DMA API, we provide something that looks + * close enough to make parport_pc happy + */ +#define HAS_DMA + static struct sparc_ebus_info { struct ebus_dma_info info; unsigned int addr; -- cgit v0.10.2 From a553260618d88c4790daec7975c88f3db1080b5b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:39:42 +0100 Subject: [PATCH] ISA DMA Kconfig fixes - part 3 Drivers that expect ISA DMA API are marked as such in Kconfig. Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e43e023..b594768 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -105,7 +105,7 @@ config ATARI_SLM config BLK_DEV_XD tristate "XT hard disk support" - depends on ISA + depends on ISA && ISA_DMA_API help Very old 8 bit hard disk controllers used in the IBM XT computer will be supported if you say Y here. diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index e162dab..2d5a19f 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -153,7 +153,7 @@ config DIGIEPCA config ESPSERIAL tristate "Hayes ESP serial port support" - depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP + depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP && ISA_DMA_API help This is a driver which supports Hayes ESP serial ports. Both single port cards and multiport cards are supported. Make sure to read @@ -195,7 +195,7 @@ config ISI config SYNCLINK tristate "Microgate SyncLink card support" - depends on SERIAL_NONSTANDARD && PCI + depends on SERIAL_NONSTANDARD && PCI && ISA_DMA_API help Provides support for the SyncLink ISA and PCI multiprotocol serial adapters. These adapters support asynchronous and HDLC bit diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 72f2b46..2e70d74 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig @@ -51,7 +51,7 @@ config MMC_PXA config MMC_WBSD tristate "Winbond W83L51xD SD/MMC Card Interface support" - depends on MMC && ISA + depends on MMC && ISA && ISA_DMA_API help This selects the Winbond(R) W83L51xD Secure digital and Multimedia card Interface. diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 68242bd..3a0a55b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -589,7 +589,7 @@ config EL2 config ELPLUS tristate "3c505 \"EtherLink Plus\" support" - depends on NET_VENDOR_3COM && ISA + depends on NET_VENDOR_3COM && ISA && ISA_DMA_API ---help--- Information about this network (Ethernet) card can be found in . If you have a card of @@ -630,7 +630,7 @@ config EL3 config 3C515 tristate "3c515 ISA \"Fast EtherLink\"" - depends on NET_VENDOR_3COM && (ISA || EISA) + depends on NET_VENDOR_3COM && (ISA || EISA) && ISA_DMA_API help If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet network card, say Y and read the Ethernet-HOWTO, available from @@ -708,7 +708,7 @@ config TYPHOON config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" - depends on NET_ETHERNET && ISA + depends on NET_ETHERNET && ISA && ISA_DMA_API help If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from @@ -864,7 +864,7 @@ config NI52 config NI65 tristate "NI6510 support" - depends on NET_VENDOR_RACAL && ISA + depends on NET_VENDOR_RACAL && ISA && ISA_DMA_API help If you have a network (Ethernet) card of this type, say Y and read the Ethernet-HOWTO, available from @@ -1072,7 +1072,7 @@ config NE2000 config ZNET tristate "Zenith Z-Note support (EXPERIMENTAL)" - depends on NET_ISA && EXPERIMENTAL + depends on NET_ISA && EXPERIMENTAL && ISA_DMA_API help The Zenith Z-Note notebook computer has a built-in network (Ethernet) card, and this is the Linux driver for it. Note that the diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 60b1967..69c488d 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -13,7 +13,7 @@ config DEV_APPLETALK config LTPC tristate "Apple/Farallon LocalTalk PC support" - depends on DEV_APPLETALK && (ISA || EISA) + depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API help This allows you to use the AppleTalk PC card to connect to LocalTalk networks. The card is also known as the Farallon PhoneNet PC card. diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index 34068f8..7cdebe1 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -45,7 +45,7 @@ config BPQETHER config DMASCC tristate "High-speed (DMA) SCC driver for AX.25" - depends on ISA && AX25 && BROKEN_ON_SMP + depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API ---help--- This is a driver for high-speed SCC boards, i.e. those supporting DMA on one port. You usually use those boards to connect your @@ -78,7 +78,7 @@ config DMASCC config SCC tristate "Z8530 SCC driver" - depends on ISA && AX25 + depends on ISA && AX25 && ISA_DMA_API ---help--- These cards are used to connect your Linux box to an amateur radio in order to communicate with other computers. If you want to use diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 3579193..66b9466 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -26,7 +26,7 @@ config WAN # There is no way to detect a comtrol sv11 - force it modular for now. config HOSTESS_SV11 tristate "Comtrol Hostess SV-11 support" - depends on WAN && ISA && m + depends on WAN && ISA && m && ISA_DMA_API help Driver for Comtrol Hostess SV-11 network card which operates on low speed synchronous serial links at up to @@ -38,7 +38,7 @@ config HOSTESS_SV11 # The COSA/SRP driver has not been tested as non-modular yet. config COSA tristate "COSA/SRP sync serial boards support" - depends on WAN && ISA && m + depends on WAN && ISA && m && ISA_DMA_API ---help--- Driver for COSA and SRP synchronous serial boards. @@ -127,7 +127,7 @@ config LANMEDIA # There is no way to detect a Sealevel board. Force it modular config SEALEVEL_4021 tristate "Sealevel Systems 4021 support" - depends on WAN && ISA && m + depends on WAN && ISA && m && ISA_DMA_API help This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 750b11c..1811cb2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -260,7 +260,7 @@ config SCSI_3W_9XXX config SCSI_7000FASST tristate "7000FASST SCSI support" - depends on ISA && SCSI + depends on ISA && SCSI && ISA_DMA_API help This driver supports the Western Digital 7000 SCSI host adapter family. Some information is in the source: @@ -295,7 +295,7 @@ config SCSI_AHA152X config SCSI_AHA1542 tristate "Adaptec AHA1542 support" - depends on ISA && SCSI + depends on ISA && SCSI && ISA_DMA_API ---help--- This is support for a SCSI host adapter. It is explained in section 3.4 of the SCSI-HOWTO, available from @@ -515,7 +515,7 @@ config SCSI_SATA_VITESSE config SCSI_BUSLOGIC tristate "BusLogic SCSI support" - depends on (PCI || ISA || MCA) && SCSI && (BROKEN || !SPARC64) + depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API ---help--- This is support for BusLogic MultiMaster and FlashPoint SCSI Host Adapters. Consult the SCSI-HOWTO, available from @@ -571,7 +571,7 @@ config SCSI_DTC3280 config SCSI_EATA tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support" - depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64) + depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API ---help--- This driver supports all EATA/DMA-compliant SCSI host adapters. DPT ISA and all EISA I/O addresses are probed looking for the "EATA" @@ -665,7 +665,7 @@ config SCSI_FD_MCS config SCSI_GDTH tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" - depends on (ISA || EISA || PCI) && SCSI && (BROKEN || !SPARC64) + depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API ---help--- Formerly called GDT SCSI Disk Array Controller Support. @@ -1416,7 +1416,7 @@ config SCSI_T128 config SCSI_U14_34F tristate "UltraStor 14F/34F support" - depends on ISA && SCSI + depends on ISA && SCSI && ISA_DMA_API ---help--- This is support for the UltraStor 14F and 34F SCSI-2 host adapters. The source at contains some -- cgit v0.10.2 From 56c3b7d788c21eecf5641020fcf8e4e15d0c5eb0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:39:52 +0100 Subject: [PATCH] ISA DMA Kconfig fixes - part 4 (irda) * net/irda/irda_device.c::irda_setup_dma() made conditional on ISA_DMA_API (it uses helpers in question and irda is usable on platforms that don't have them at all - think of USB IRDA, for example). * irda drivers that depend on ISA DMA marked as dependent on ISA_DMA_API Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 6bf76a4..1c553d7 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -310,7 +310,7 @@ config SIGMATEL_FIR config NSC_FIR tristate "NSC PC87108/PC87338" - depends on IRDA + depends on IRDA && ISA_DMA_API help Say Y here if you want to build support for the NSC PC87108 and PC87338 IrDA chipsets. This driver supports SIR, @@ -321,7 +321,7 @@ config NSC_FIR config WINBOND_FIR tristate "Winbond W83977AF (IR)" - depends on IRDA + depends on IRDA && ISA_DMA_API help Say Y here if you want to build IrDA support for the Winbond W83977AF super-io chipset. This driver should be used for the IrDA @@ -347,7 +347,7 @@ config AU1000_FIR config SMC_IRCC_FIR tristate "SMSC IrCC (EXPERIMENTAL)" - depends on EXPERIMENTAL && IRDA + depends on EXPERIMENTAL && IRDA && ISA_DMA_API help Say Y here if you want to build support for the SMC Infrared Communications Controller. It is used in a wide variety of @@ -357,7 +357,7 @@ config SMC_IRCC_FIR config ALI_FIR tristate "ALi M5123 FIR (EXPERIMENTAL)" - depends on EXPERIMENTAL && IRDA + depends on EXPERIMENTAL && IRDA && ISA_DMA_API help Say Y here if you want to build support for the ALi M5123 FIR Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C, @@ -385,7 +385,7 @@ config SA1100_FIR config VIA_FIR tristate "VIA VT8231/VT1211 SIR/MIR/FIR" - depends on IRDA + depends on IRDA && ISA_DMA_API help Say Y here if you want to build support for the VIA VT8231 and VIA VT1211 IrDA controllers, found on the motherboards using diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index d6ccd32..70543d8 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -470,6 +470,7 @@ void irda_device_unregister_dongle(struct dongle_reg *dongle) } EXPORT_SYMBOL(irda_device_unregister_dongle); +#ifdef CONFIG_ISA_DMA_API /* * Function setup_dma (idev, buffer, count, mode) * @@ -492,3 +493,4 @@ void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode) release_dma_lock(flags); } EXPORT_SYMBOL(irda_setup_dma); +#endif -- cgit v0.10.2 From 0555985d046348b39e44ff1da2719d73409d7981 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:40:02 +0100 Subject: [PATCH] sonypi trivial user annotations Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index c812191..fd04206 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -1021,11 +1021,11 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp, ret = -EIO; break; } - if (copy_to_user((u8 *)arg, &val8, sizeof(val8))) + if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSFAN: - if (copy_from_user(&val8, (u8 *)arg, sizeof(val8))) { + if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } @@ -1038,7 +1038,7 @@ static int sonypi_misc_ioctl(struct inode *ip, struct file *fp, ret = -EIO; break; } - if (copy_to_user((u8 *)arg, &val8, sizeof(val8))) + if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; default: -- cgit v0.10.2 From b1ecb4c3a9e33cc8b93ac9cb046b535b72a15f68 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:40:12 +0100 Subject: [PATCH] asm/signal.h unification New file - asm-generic/signal.h. Contains declarations of __sighandler_t, __sigrestore_t, SIG_DFL, SIG_IGN, SIG_ERR and default definitions of SIG_BLOCK, SIG_UNBLOCK and SIG_SETMASK. asm-*/signal.h switched to including it. The only exception is asm-parisc/signal.h that wants its own declaration of __sighandler_t; that one is left as-is. asm-ppc64/signal.h required one more thing - unlike everybody else it used __sigrestorer_t instead of usual __sigrestore_t. PPC64 switched to common spelling. Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h index 4e0842b..1a2c52a 100644 --- a/include/asm-alpha/signal.h +++ b/include/asm-alpha/signal.h @@ -113,16 +113,7 @@ typedef unsigned long sigset_t; #define SIG_UNBLOCK 2 /* for unblocking signals */ #define SIG_SETMASK 3 /* for setting the signal mask */ -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct osf_sigaction { diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h index b860dc3..46e69ae 100644 --- a/include/asm-arm/signal.h +++ b/include/asm-arm/signal.h @@ -117,20 +117,7 @@ typedef unsigned long sigset_t; #define SA_IRQNOMASK 0x08000000 #endif -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-arm26/signal.h b/include/asm-arm26/signal.h index a1aacef..dedb292 100644 --- a/include/asm-arm26/signal.h +++ b/include/asm-arm26/signal.h @@ -117,16 +117,7 @@ typedef unsigned long sigset_t; #define SA_IRQNOMASK 0x08000000 #endif -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-cris/signal.h b/include/asm-cris/signal.h index 2330769..dfe0395 100644 --- a/include/asm-cris/signal.h +++ b/include/asm-cris/signal.h @@ -108,16 +108,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h index c930bb1..d407bde 100644 --- a/include/asm-frv/signal.h +++ b/include/asm-frv/signal.h @@ -107,16 +107,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h new file mode 100644 index 0000000..9418d6e --- /dev/null +++ b/include/asm-generic/signal.h @@ -0,0 +1,21 @@ +#ifndef SIG_BLOCK +#define SIG_BLOCK 0 /* for blocking signals */ +#endif +#ifndef SIG_UNBLOCK +#define SIG_UNBLOCK 1 /* for unblocking signals */ +#endif +#ifndef SIG_SETMASK +#define SIG_SETMASK 2 /* for setting the signal mask */ +#endif + +#ifndef __ASSEMBLY__ +typedef void __signalfn_t(int); +typedef __signalfn_t __user *__sighandler_t; + +typedef void __restorefn_t(void); +typedef __restorefn_t __user *__sigrestore_t; + +#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */ +#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */ +#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */ +#endif diff --git a/include/asm-h8300/signal.h b/include/asm-h8300/signal.h index ac3e01b..8eccdc1 100644 --- a/include/asm-h8300/signal.h +++ b/include/asm-h8300/signal.h @@ -107,16 +107,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 0f082bd..cbb47d3 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -110,20 +110,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 85a577a..608168d 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -118,13 +118,7 @@ #endif /* __KERNEL__ */ -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include # ifndef __ASSEMBLY__ @@ -133,9 +127,6 @@ /* Avoid too many header ordering problems. */ struct siginfo; -/* Type of a signal handler. */ -typedef void __user (*__sighandler_t)(int); - typedef struct sigaltstack { void __user *ss_sp; int ss_flags; diff --git a/include/asm-m32r/signal.h b/include/asm-m32r/signal.h index 6e55fd4..95f69b1 100644 --- a/include/asm-m32r/signal.h +++ b/include/asm-m32r/signal.h @@ -114,20 +114,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h index 1d016e9..a0cdf90 100644 --- a/include/asm-m68k/signal.h +++ b/include/asm-m68k/signal.h @@ -105,29 +105,20 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; unsigned long sa_flags; - void (*sa_restorer)(void); + __sigrestore_t sa_restorer; }; struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; - void (*sa_restorer)(void); + __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; diff --git a/include/asm-m68knommu/signal.h b/include/asm-m68knommu/signal.h index 37c9c8a..1d13187 100644 --- a/include/asm-m68knommu/signal.h +++ b/include/asm-m68knommu/signal.h @@ -105,16 +105,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h index d813567..f2c470f 100644 --- a/include/asm-mips/signal.h +++ b/include/asm-mips/signal.h @@ -103,14 +103,7 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SIG_SETMASK 3 /* for setting the signal mask */ #define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility: set only the low 32 bit of the sigset. */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -/* Fake signal functions */ -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include struct sigaction { unsigned int sa_flags; diff --git a/include/asm-ppc/signal.h b/include/asm-ppc/signal.h index d890dab..caf6ede 100644 --- a/include/asm-ppc/signal.h +++ b/include/asm-ppc/signal.h @@ -100,20 +100,7 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include struct old_sigaction { __sighandler_t sa_handler; diff --git a/include/asm-ppc64/signal.h b/include/asm-ppc64/signal.h index a2d7bbb..432df7dd 100644 --- a/include/asm-ppc64/signal.h +++ b/include/asm-ppc64/signal.h @@ -97,33 +97,19 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void __sigfunction(int); -typedef __sigfunction __user * __sighandler_t; - -/* Type of the restorer function */ -typedef void __sigrestorer(void); -typedef __sigrestorer __user * __sigrestorer_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; unsigned long sa_flags; - __sigrestorer_t sa_restorer; + __sigrestore_t sa_restorer; }; struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; - __sigrestorer_t sa_restorer; + __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; diff --git a/include/asm-s390/signal.h b/include/asm-s390/signal.h index bfed83a..3d6e11c 100644 --- a/include/asm-s390/signal.h +++ b/include/asm-s390/signal.h @@ -117,16 +117,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-sh/signal.h b/include/asm-sh/signal.h index 29f1ac1..d6e8eb0 100644 --- a/include/asm-sh/signal.h +++ b/include/asm-sh/signal.h @@ -108,16 +108,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h index 864c94e..2400dc6 100644 --- a/include/asm-sh64/signal.h +++ b/include/asm-sh64/signal.h @@ -107,16 +107,7 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ THREAD_SIZE -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h index f792e10..aa9960a 100644 --- a/include/asm-sparc/signal.h +++ b/include/asm-sparc/signal.h @@ -174,16 +174,7 @@ struct sigstack { #define SA_STATIC_ALLOC 0x80 #endif -/* Type of a signal handler. */ -#ifdef __KERNEL__ -typedef void (*__sighandler_t)(int, int, struct sigcontext *, char *); -#else -typedef void (*__sighandler_t)(int); -#endif - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include #ifdef __KERNEL__ struct __new_sigaction { diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h index 466d021..becdf1b 100644 --- a/include/asm-sparc64/signal.h +++ b/include/asm-sparc64/signal.h @@ -177,21 +177,7 @@ struct sigstack { #define SA_STATIC_ALLOC 0x80 #endif -/* Type of a signal handler. */ -#ifdef __KERNEL__ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; -#else -typedef void (*__sighandler_t)(int); -typedef void (*__sigrestore_t)(void); -#endif - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include struct __new_sigaction { __sighandler_t sa_handler; diff --git a/include/asm-v850/signal.h b/include/asm-v850/signal.h index ec3566c..cb52caa 100644 --- a/include/asm-v850/signal.h +++ b/include/asm-v850/signal.h @@ -110,17 +110,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ - +#include #ifdef __KERNEL__ diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index 4987ad8..fe9b96d 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -116,21 +116,9 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ +#include #ifndef __ASSEMBLY__ -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - -typedef void __restorefn_t(void); -typedef __restorefn_t __user *__sigrestore_t; - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ struct sigaction { __sighandler_t sa_handler; -- cgit v0.10.2 From 1b75d8ba5ea96e174dc2674e01d87ce0d382ee44 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 May 2005 05:40:22 +0100 Subject: [PATCH] ipmi iomem annotations and fixes annotated, a bunch of direct dereferencing replaced with readb(). Signed-off-by: Al Viro Signed-off-by: Linus Torvalds diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5419440..298574e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1617,15 +1617,15 @@ typedef struct dmi_header u16 handle; } dmi_header_t; -static int decode_dmi(dmi_header_t *dm, int intf_num) +static int decode_dmi(dmi_header_t __iomem *dm, int intf_num) { - u8 *data = (u8 *)dm; + u8 __iomem *data = (u8 __iomem *)dm; unsigned long base_addr; u8 reg_spacing; - u8 len = dm->length; + u8 len = readb(&dm->length); dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; - ipmi_data->type = data[4]; + ipmi_data->type = readb(&data[4]); memcpy(&base_addr, data+8, sizeof(unsigned long)); if (len >= 0x11) { @@ -1640,12 +1640,12 @@ static int decode_dmi(dmi_header_t *dm, int intf_num) } /* If bit 4 of byte 0x10 is set, then the lsb for the address is odd. */ - ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); + ipmi_data->base_addr = base_addr | ((readb(&data[0x10]) & 0x10) >> 4); - ipmi_data->irq = data[0x11]; + ipmi_data->irq = readb(&data[0x11]); /* The top two bits of byte 0x10 hold the register spacing. */ - reg_spacing = (data[0x10] & 0xC0) >> 6; + reg_spacing = (readb(&data[0x10]) & 0xC0) >> 6; switch(reg_spacing){ case 0x00: /* Byte boundaries */ ipmi_data->offset = 1; @@ -1673,7 +1673,7 @@ static int decode_dmi(dmi_header_t *dm, int intf_num) ipmi_data->offset = 1; } - ipmi_data->slave_addr = data[6]; + ipmi_data->slave_addr = readb(&data[6]); if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) { dmi_data_entries++; @@ -1687,9 +1687,9 @@ static int decode_dmi(dmi_header_t *dm, int intf_num) static int dmi_table(u32 base, int len, int num) { - u8 *buf; - struct dmi_header *dm; - u8 *data; + u8 __iomem *buf; + struct dmi_header __iomem *dm; + u8 __iomem *data; int i=1; int status=-1; int intf_num = 0; @@ -1702,12 +1702,12 @@ static int dmi_table(u32 base, int len, int num) while(ilength) >= len) + if((data-buf+readb(&dm->length)) >= len) break; - if (dm->type == 38) { + if (readb(&dm->type) == 38) { if (decode_dmi(dm, intf_num) == 0) { intf_num++; if (intf_num >= SI_MAX_DRIVERS) @@ -1715,8 +1715,8 @@ static int dmi_table(u32 base, int len, int num) } } - data+=dm->length; - while((data-buf) < len && (*data || data[1])) + data+=readb(&dm->length); + while((data-buf) < len && (readb(data)||readb(data+1))) data++; data+=2; i++; diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index a0212b0..62791dd 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h @@ -51,7 +51,7 @@ struct si_sm_io /* Generic info used by the actual handling routines, the state machine shouldn't touch these. */ void *info; - void *addr; + void __iomem *addr; int regspacing; int regsize; int regshift; -- cgit v0.10.2 From 836eeed6cea8ca8138e9deff7571857923541604 Mon Sep 17 00:00:00 2001 From: Denis Vlasenko Date: Wed, 4 May 2005 08:07:16 +0300 Subject: [PATCH] i810fb: fix __initdata access [hv]sync[12] are __initdata, causing mplayer to oops with the previous i810fb fix. My fault, this fixes it. Sorry. Signed-off-by: Linux Torvalds diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index e04d3e8..a9a618f 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c @@ -1000,8 +1000,10 @@ static int i810_check_params(struct fb_var_screeninfo *var, if (fb_validate_mode(var, info)) { if (fb_get_mode(FB_MAXTIMINGS, 0, var, info)) { - int default_sync = (hsync1-HFMIN)|(hsync2-HFMAX) - |(vsync1-VFMIN)|(vsync2-VFMAX); + int default_sync = (info->monspecs.hfmin-HFMIN) + |(info->monspecs.hfmax-HFMAX) + |(info->monspecs.vfmin-VFMIN) + |(info->monspecs.vfmax-VFMAX); printk("i810fb: invalid video mode%s\n", default_sync ? "" : ". Specifying vsyncN/hsyncN parameters may help"); -- cgit v0.10.2 From 9b48b46678989b67cd00658ea88964163eaab616 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 22 Mar 2005 16:00:00 -0700 Subject: [IA64-SGI] move nodepda pointer out of pda Remove the p_nodepda and p_subnodepda pointers from the pda_s structure. And then define a new per-cpu pointer to the nodepda and export it so that it can be accessed by kernel modules. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index fea71ee..4fb4498 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -76,6 +76,9 @@ EXPORT_PER_CPU_SYMBOL(__sn_hub_info); DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); +DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); +EXPORT_PER_CPU_SYMBOL(__sn_nodepda); + partid_t sn_partid = -1; EXPORT_SYMBOL(sn_partid); char sn_system_serial_number_string[128]; @@ -480,7 +483,8 @@ void __init sn_cpu_init(void) cnode = nasid_to_cnodeid(nasid); - pda->p_nodepda = nodepdaindr[cnode]; + sn_nodepda = nodepdaindr[cnode]; + pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; @@ -626,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice) long cpu; for (cpu=0; cpu < NR_CPUS; cpu++) - if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice) + if (cpuid_to_nasid(cpu) == nasid && + cpuid_to_slice(cpu) == slice) return cpu; return -1; diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h index 13cc100..7138b1e 100644 --- a/include/asm-ia64/sn/nodepda.h +++ b/include/asm-ia64/sn/nodepda.h @@ -13,7 +13,6 @@ #include #include #include -#include #include /* @@ -67,20 +66,18 @@ typedef struct nodepda_s nodepda_t; * The next set of definitions provides this. * Routines are expected to use * - * nodepda -> to access node PDA for the node on which code is running - * subnodepda -> to access subnode PDA for the subnode on which code is running - * - * NODEPDA(cnode) -> to access node PDA for cnodeid - * SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode + * sn_nodepda - to access node PDA for the node on which code is running + * NODEPDA(cnodeid) - to access node PDA for cnodeid */ -#define nodepda pda->p_nodepda /* Ptr to this node's PDA */ -#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode]) +DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); +#define sn_nodepda (__get_cpu_var(__sn_nodepda)) +#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) /* * Check if given a compact node id the corresponding node has all the * cpus disabled. */ -#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0) +#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0) #endif /* _ASM_IA64_SN_NODEPDA_H */ diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h index a5340cf..ea5590c 100644 --- a/include/asm-ia64/sn/pda.h +++ b/include/asm-ia64/sn/pda.h @@ -24,14 +24,6 @@ typedef struct pda_s { - /* Having a pointer in the begining of PDA tends to increase - * the chance of having this pointer in cache. (Yes something - * else gets pushed out). Doing this reduces the number of memory - * access to all nodepda variables to be one - */ - struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */ - struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */ - /* * Support for SN LEDs */ diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h index 6b44290..20b3001 100644 --- a/include/asm-ia64/sn/sn_cpuid.h +++ b/include/asm-ia64/sn/sn_cpuid.h @@ -92,24 +92,24 @@ * NOTE: on non-MP systems, only cpuid 0 exists */ -extern short physical_node_map[]; /* indexed by nasid to get cnode */ +extern short physical_node_map[]; /* indexed by nasid to get cnode */ /* * Macros for retrieving info about current cpu */ -#define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid) -#define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode) -#define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice) -#define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode) -#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) +#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid) +#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode) +#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice) +#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode) +#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) /* * Macros for retrieving info about an arbitrary cpu * cpuid - logical cpu id */ -#define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid) -#define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode) -#define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice) +#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid) +#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode) +#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice) #define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)]) -- cgit v0.10.2 From b48fc7bb3868abffc89ce70d4baf324574338d8e Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:05:00 -0700 Subject: [IA64-SGI] convert some sn SAL_CALLs to ia64_sal_oemcall calls Convert some sn SAL_CALLs to ia64_sal_oemcall calls so that they can be called by kernel modules. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index f914f6d..56d74ca 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h @@ -557,7 +557,8 @@ static inline u64 ia64_sn_partition_serial_get(void) { struct ia64_sal_retval ret_stuff; - SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0); + ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, + 0, 0, 0, 0, 0, 0); if (ret_stuff.status != 0) return 0; return ret_stuff.v0; @@ -565,11 +566,10 @@ ia64_sn_partition_serial_get(void) static inline u64 sn_partition_serial_number_val(void) { - if (sn_partition_serial_number) { - return(sn_partition_serial_number); - } else { - return(sn_partition_serial_number = ia64_sn_partition_serial_get()); + if (unlikely(sn_partition_serial_number == 0)) { + sn_partition_serial_number = ia64_sn_partition_serial_get(); } + return sn_partition_serial_number; } /* @@ -580,8 +580,8 @@ static inline partid_t ia64_sn_sysctl_partition_get(nasid_t nasid) { struct ia64_sal_retval ret_stuff; - SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, - 0, 0, 0, 0, 0, 0); + ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, + 0, 0, 0, 0, 0, 0); if (ret_stuff.status != 0) return INVALID_PARTID; return ((partid_t)ret_stuff.v0); @@ -595,11 +595,38 @@ extern partid_t sn_partid; static inline partid_t sn_local_partid(void) { - if (sn_partid < 0) { - return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()))); - } else { - return sn_partid; + if (unlikely(sn_partid < 0)) { + sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())); } + return sn_partid; +} + +/* + * Returns the physical address of the partition's reserved page through + * an iterative number of calls. + * + * On first call, 'cookie' and 'len' should be set to 0, and 'addr' + * set to the nasid of the partition whose reserved page's address is + * being sought. + * On subsequent calls, pass the values, that were passed back on the + * previous call. + * + * While the return status equals SALRET_MORE_PASSES, keep calling + * this function after first copying 'len' bytes starting at 'addr' + * into 'buf'. Once the return status equals SALRET_OK, 'addr' will + * be the physical address of the partition's reserved page. If the + * return status equals neither of these, an error as occurred. + */ +static inline s64 +sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len) +{ + struct ia64_sal_retval rv; + ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie, + *addr, buf, *len, 0, 0, 0); + *cookie = rv.v0; + *addr = rv.v1; + *len = rv.v2; + return rv.status; } /* @@ -621,8 +648,8 @@ static inline int sn_register_xp_addr_region(u64 paddr, u64 len, int operation) { struct ia64_sal_retval ret_stuff; - SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation, - 0, 0, 0, 0); + ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, + (u64)operation, 0, 0, 0, 0); return ret_stuff.status; } @@ -646,8 +673,8 @@ sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr, } else { call = SN_SAL_NO_FAULT_ZONE_PHYSICAL; } - SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1, - 0, 0, 0); + ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr, + (u64)1, 0, 0, 0); return ret_stuff.status; } @@ -668,8 +695,8 @@ static inline int sn_change_coherence(u64 *new_domain, u64 *old_domain) { struct ia64_sal_retval ret_stuff; - SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0, - 0, 0, 0); + ia64_sal_oemcall(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain, + (u64)old_domain, 0, 0, 0, 0, 0); return ret_stuff.status; } @@ -688,8 +715,8 @@ sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) cnodeid = nasid_to_cnodeid(get_node_number(paddr)); // spin_lock(&NODEPDA(cnodeid)->bist_lock); local_irq_save(irq_flags); - SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array, - perms, 0, 0, 0); + ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, + (u64)nasid_array, perms, 0, 0, 0); local_irq_restore(irq_flags); // spin_unlock(&NODEPDA(cnodeid)->bist_lock); return ret_stuff.status; -- cgit v0.10.2 From 177a4324944478f2799ce4ede2797cb0f602f274 Mon Sep 17 00:00:00 2001 From: Alexander Nyberg Date: Sat, 26 Feb 2005 13:38:51 +0100 Subject: [PATCH] Hotplug: Make dev->bus checking consistent Earlier in the same function dev->bus is checked before dereferenced, make consistent although I honestly don't know if dev->bus could ever be NULL Found by the Coverity tool Signed-off-by: Alexander Nyberg Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/base/core.c b/drivers/base/core.c index a7cedd8..268a9c8 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -139,7 +139,7 @@ static int dev_hotplug(struct kset *kset, struct kobject *kobj, char **envp, buffer = &buffer[length]; buffer_size -= length; - if (dev->bus->hotplug) { + if (dev->bus && dev->bus->hotplug) { /* have the bus specific function add its stuff */ retval = dev->bus->hotplug (dev, envp, num_envp, buffer, buffer_size); if (retval) { -- cgit v0.10.2 From b2d84f078a8be40f5ae3b4d2ac001e2a7f45fe4f Mon Sep 17 00:00:00 2001 From: Roman Kagan Date: Wed, 13 Apr 2005 21:40:17 +0400 Subject: [PATCH] drivers/base/bus.c: fix iteration in driver_detach() With 2.6.11 and 2.6.12-rc2 (and perhaps a few versions before) usb drivers for multi-interface devices, which do usb_driver_release_interface() in their disconnect(), make rmmod hang. It turns out to be due to a bug in drivers/base/bus.c:driver_detach(), that iterates over the list of attached devices with list_for_each_safe() under an assumption that device_release_driver() only releases the current device, while it may also call device_release_driver() for other devices on the same list. The following patch fixes it. Please consider applying. Signed-off-by: Roman Kagan Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/base/bus.c b/drivers/base/bus.c index f4fa273..2b3902c 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -405,9 +405,8 @@ void device_release_driver(struct device * dev) static void driver_detach(struct device_driver * drv) { - struct list_head * entry, * next; - list_for_each_safe(entry, next, &drv->devices) { - struct device * dev = container_of(entry, struct device, driver_list); + while (!list_empty(&drv->devices)) { + struct device * dev = container_of(drv->devices.next, struct device, driver_list); device_release_driver(dev); } } -- cgit v0.10.2