diff options
Diffstat (limited to 'include')
161 files changed, 2789 insertions, 1004 deletions
diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h index 21816d3..6a9f02a 100644 --- a/include/asm-alpha/floppy.h +++ b/include/asm-alpha/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - SA_INTERRUPT, "floppy", NULL) + IRQF_DISABLED, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); #ifdef CONFIG_PCI diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h index fafdd4f..1570c0b 100644 --- a/include/asm-alpha/rwsem.h +++ b/include/asm-alpha/rwsem.h @@ -36,20 +36,11 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -59,9 +50,6 @@ static inline void init_rwsem(struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } static inline void __down_read(struct rw_semaphore *sem) diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h index 1a2c52a..13c2305 100644 --- a/include/asm-alpha/signal.h +++ b/include/asm-alpha/signal.h @@ -77,7 +77,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -98,7 +97,6 @@ typedef unsigned long sigset_t; #define SA_ONESHOT SA_RESETHAND #define SA_NOMASK SA_NODEFER -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ /* * sigaltstack controls diff --git a/include/asm-arm/arch-omap/board-fsample.h b/include/asm-arm/arch-omap/board-fsample.h new file mode 100644 index 0000000..89a1e52 --- /dev/null +++ b/include/asm-arm/arch-omap/board-fsample.h @@ -0,0 +1,51 @@ +/* + * linux/include/asm-arm/arch-omap/board-fsample.h + * + * Board-specific goodies for TI F-Sample. + * + * Copyright (C) 2006 Google, Inc. + * Author: Brian Swetland <swetland@google.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARCH_OMAP_FSAMPLE_H +#define __ASM_ARCH_OMAP_FSAMPLE_H + +/* fsample is pretty close to p2-sample */ +#include <asm/arch/board-perseus2.h> + +#define fsample_cpld_read(reg) __raw_readb(reg) +#define fsample_cpld_write(val, reg) __raw_writeb(val, reg) + +#define FSAMPLE_CPLD_BASE 0xE8100000 +#define FSAMPLE_CPLD_SIZE SZ_4K +#define FSAMPLE_CPLD_START 0x05080000 + +#define FSAMPLE_CPLD_REG_A (FSAMPLE_CPLD_BASE + 0x00) +#define FSAMPLE_CPLD_SWITCH (FSAMPLE_CPLD_BASE + 0x02) +#define FSAMPLE_CPLD_UART (FSAMPLE_CPLD_BASE + 0x02) +#define FSAMPLE_CPLD_REG_B (FSAMPLE_CPLD_BASE + 0x04) +#define FSAMPLE_CPLD_VERSION (FSAMPLE_CPLD_BASE + 0x06) +#define FSAMPLE_CPLD_SET_CLR (FSAMPLE_CPLD_BASE + 0x06) + +#define FSAMPLE_CPLD_BIT_BT_RESET 0 +#define FSAMPLE_CPLD_BIT_LCD_RESET 1 +#define FSAMPLE_CPLD_BIT_CAM_PWDN 2 +#define FSAMPLE_CPLD_BIT_CHARGER_ENABLE 3 +#define FSAMPLE_CPLD_BIT_SD_MMC_EN 4 +#define FSAMPLE_CPLD_BIT_aGPS_PWREN 5 +#define FSAMPLE_CPLD_BIT_BACKLIGHT 6 +#define FSAMPLE_CPLD_BIT_aGPS_EN_RESET 7 +#define FSAMPLE_CPLD_BIT_aGPS_SLEEPx_N 8 +#define FSAMPLE_CPLD_BIT_OTG_RESET 9 + +#define fsample_cpld_set(bit) \ + fsample_cpld_write((((bit) & 15) << 4) | 0x0f, FSAMPLE_CPLD_SET_CLR) + +#define fsample_cpld_clear(bit) \ + fsample_cpld_write(0xf0 | ((bit) & 15), FSAMPLE_CPLD_SET_CLR) + +#endif diff --git a/include/asm-arm/arch-omap/board.h b/include/asm-arm/arch-omap/board.h index dfdbf06..edf1dc6 100644 --- a/include/asm-arm/arch-omap/board.h +++ b/include/asm-arm/arch-omap/board.h @@ -22,6 +22,7 @@ #define OMAP_TAG_UART 0x4f07 #define OMAP_TAG_FBMEM 0x4f08 #define OMAP_TAG_STI_CONSOLE 0x4f09 +#define OMAP_TAG_CAMERA_SENSOR 0x4f0a #define OMAP_TAG_BOOT_REASON 0x4f80 #define OMAP_TAG_FLASH_PART 0x4f81 @@ -61,6 +62,12 @@ struct omap_sti_console_config { u8 channel; }; +struct omap_camera_sensor_config { + u16 reset_gpio; + int (*power_on)(void * data); + int (*power_off)(void * data); +}; + struct omap_usb_config { /* Configure drivers according to the connectors on your board: * - "A" connector (rectagular) diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h index ca12023..1b1b023 100644 --- a/include/asm-arm/arch-omap/dma.h +++ b/include/asm-arm/arch-omap/dma.h @@ -185,8 +185,8 @@ /* DMA channels for 24xx */ #define OMAP24XX_DMA_NO_DEVICE 0 #define OMAP24XX_DMA_XTI_DMA 1 /* S_DMA_0 */ -#define OMAP24XX_DMA_EXT_NDMA_REQ0 2 /* S_DMA_1 */ -#define OMAP24XX_DMA_EXT_NDMA_REQ1 3 /* S_DMA_2 */ +#define OMAP24XX_DMA_EXT_DMAREQ0 2 /* S_DMA_1 */ +#define OMAP24XX_DMA_EXT_DMAREQ1 3 /* S_DMA_2 */ #define OMAP24XX_DMA_GPMC 4 /* S_DMA_3 */ #define OMAP24XX_DMA_GFX 5 /* S_DMA_4 */ #define OMAP24XX_DMA_DSS 6 /* S_DMA_5 */ @@ -197,7 +197,9 @@ #define OMAP24XX_DMA_DES_TX 11 /* S_DMA_10 */ #define OMAP24XX_DMA_DES_RX 12 /* S_DMA_11 */ #define OMAP24XX_DMA_SHA1MD5_RX 13 /* S_DMA_12 */ - +#define OMAP24XX_DMA_EXT_DMAREQ2 14 /* S_DMA_13 */ +#define OMAP24XX_DMA_EXT_DMAREQ3 15 /* S_DMA_14 */ +#define OMAP24XX_DMA_EXT_DMAREQ4 16 /* S_DMA_15 */ #define OMAP24XX_DMA_EAC_AC_RD 17 /* S_DMA_16 */ #define OMAP24XX_DMA_EAC_AC_WR 18 /* S_DMA_17 */ #define OMAP24XX_DMA_EAC_MD_UL_RD 19 /* S_DMA_18 */ @@ -244,6 +246,7 @@ #define OMAP24XX_DMA_MMC1_TX 61 /* SDMA_60 */ #define OMAP24XX_DMA_MMC1_RX 62 /* SDMA_61 */ #define OMAP24XX_DMA_MS 63 /* SDMA_62 */ +#define OMAP24XX_DMA_EXT_DMAREQ5 64 /* S_DMA_63 */ /*----------------------------------------------------------------------------*/ @@ -274,7 +277,7 @@ #define OMAP1610_DMA_LCD_LCH_CTRL (OMAP1610_DMA_LCD_BASE + 0xea) #define OMAP1610_DMA_LCD_SRC_FI_B1_U (OMAP1610_DMA_LCD_BASE + 0xf4) -#define OMAP_DMA_TOUT_IRQ (1 << 0) /* Only on omap1 */ +#define OMAP1_DMA_TOUT_IRQ (1 << 0) #define OMAP_DMA_DROP_IRQ (1 << 1) #define OMAP_DMA_HALF_IRQ (1 << 2) #define OMAP_DMA_FRAME_IRQ (1 << 3) @@ -315,11 +318,11 @@ enum { OMAP_LCD_DMA_B2_BOTTOM }; -/* REVISIT: Check if BURST_4 is really 1 (or 2) */ enum omap_dma_burst_mode { OMAP_DMA_DATA_BURST_DIS = 0, OMAP_DMA_DATA_BURST_4, - OMAP_DMA_DATA_BURST_8 + OMAP_DMA_DATA_BURST_8, + OMAP_DMA_DATA_BURST_16, }; enum omap_dma_color_mode { diff --git a/include/asm-arm/arch-omap/dmtimer.h b/include/asm-arm/arch-omap/dmtimer.h index e6522e6..7a289ff 100644 --- a/include/asm-arm/arch-omap/dmtimer.h +++ b/include/asm-arm/arch-omap/dmtimer.h @@ -5,6 +5,7 @@ * * Copyright (C) 2005 Nokia Corporation * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com> + * PWM and clock framwork support by Timo Teras. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -25,69 +26,56 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#ifndef __ASM_ARCH_TIMER_H -#define __ASM_ARCH_TIMER_H - -#include <linux/list.h> - -#define OMAP_TIMER_SRC_ARMXOR 0x00 -#define OMAP_TIMER_SRC_32_KHZ 0x01 -#define OMAP_TIMER_SRC_EXT_CLK 0x02 - -/* timer control reg bits */ -#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13) -#define OMAP_TIMER_CTRL_PT (1 << 12) -#define OMAP_TIMER_CTRL_TRG_OVERFLOW (0x1 << 10) -#define OMAP_TIMER_CTRL_TRG_OFANDMATCH (0x2 << 10) -#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8) -#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8) -#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8) -#define OMAP_TIMER_CTRL_SCPWM (1 << 7) -#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */ -#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */ -#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* how much to shift the prescaler value */ -#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */ -#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */ +#ifndef __ASM_ARCH_DMTIMER_H +#define __ASM_ARCH_DMTIMER_H -/* timer interrupt enable bits */ -#define OMAP_TIMER_INT_CAPTURE (1 << 2) -#define OMAP_TIMER_INT_OVERFLOW (1 << 1) -#define OMAP_TIMER_INT_MATCH (1 << 0) +/* clock sources */ +#define OMAP_TIMER_SRC_SYS_CLK 0x00 +#define OMAP_TIMER_SRC_32_KHZ 0x01 +#define OMAP_TIMER_SRC_EXT_CLK 0x02 +/* timer interrupt enable bits */ +#define OMAP_TIMER_INT_CAPTURE (1 << 2) +#define OMAP_TIMER_INT_OVERFLOW (1 << 1) +#define OMAP_TIMER_INT_MATCH (1 << 0) -struct omap_dm_timer { - struct list_head timer_list; +/* trigger types */ +#define OMAP_TIMER_TRIGGER_NONE 0x00 +#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01 +#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 - u32 base; - unsigned int irq; -}; +struct omap_dm_timer; +struct clk; -u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg); -void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value); +int omap_dm_timer_init(void); -struct omap_dm_timer * omap_dm_timer_request(void); +struct omap_dm_timer *omap_dm_timer_request(void); +struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id); void omap_dm_timer_free(struct omap_dm_timer *timer); -void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source); -void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value); -void omap_dm_timer_set_trigger(struct omap_dm_timer *timer, unsigned int value); -void omap_dm_timer_enable_compare(struct omap_dm_timer *timer); -void omap_dm_timer_enable_autoreload(struct omap_dm_timer *timer); +int omap_dm_timer_get_irq(struct omap_dm_timer *timer); + +u32 omap_dm_timer_modify_idlect_mask(u32 inputmask); +struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer); void omap_dm_timer_trigger(struct omap_dm_timer *timer); void omap_dm_timer_start(struct omap_dm_timer *timer); void omap_dm_timer_stop(struct omap_dm_timer *timer); -void omap_dm_timer_set_load(struct omap_dm_timer *timer, unsigned int load); -void omap_dm_timer_set_match(struct omap_dm_timer *timer, unsigned int match); +void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source); +void omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, unsigned int value); +void omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match); +void omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on, int toggle, int trigger); +void omap_dm_timer_set_prescaler(struct omap_dm_timer *timer, int prescaler); + +void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value); unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer); void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value); - unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer); -void omap_dm_timer_reset_counter(struct omap_dm_timer *timer); +void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value); int omap_dm_timers_active(void); -u32 omap_dm_timer_modify_idlect_mask(u32 inputmask); -#endif /* __ASM_ARCH_TIMER_H */ + +#endif /* __ASM_ARCH_DMTIMER_H */ diff --git a/include/asm-arm/arch-omap/gpmc.h b/include/asm-arm/arch-omap/gpmc.h new file mode 100644 index 0000000..1a0a520 --- /dev/null +++ b/include/asm-arm/arch-omap/gpmc.h @@ -0,0 +1,91 @@ +/* + * General-Purpose Memory Controller for OMAP2 + * + * Copyright (C) 2005-2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __OMAP2_GPMC_H +#define __OMAP2_GPMC_H + +#define GPMC_CS_CONFIG1 0x00 +#define GPMC_CS_CONFIG2 0x04 +#define GPMC_CS_CONFIG3 0x08 +#define GPMC_CS_CONFIG4 0x0c +#define GPMC_CS_CONFIG5 0x10 +#define GPMC_CS_CONFIG6 0x14 +#define GPMC_CS_CONFIG7 0x18 +#define GPMC_CS_NAND_COMMAND 0x1c +#define GPMC_CS_NAND_ADDRESS 0x20 +#define GPMC_CS_NAND_DATA 0x24 + +#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31) +#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 20) +#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29) +#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29) +#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27) +#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27) +#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25) +#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23) +#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22) +#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21) +#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18) +#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16) +#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12) +#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1) +#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10) +#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0) +#define GPMC_CONFIG1_DEVICETYPE_NAND GPMC_CONFIG1_DEVICETYPE(1) +#define GPMC_CONFIG1_MUXADDDATA (1 << 9) +#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4) +#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3) +#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1)) +#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2)) +#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3)) + +/* + * Note that all values in this struct are in nanoseconds, while + * the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode */ + u16 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u16 cs_on; /* Assertion time */ + u16 cs_rd_off; /* Read deassertion time */ + u16 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u16 adv_on; /* Assertion time */ + u16 adv_rd_off; /* Read deassertion time */ + u16 adv_wr_off; /* Write deassertion time */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u16 we_on; /* WE assertion time */ + u16 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u16 oe_on; /* OE assertion time */ + u16 oe_off; /* OE deassertion time */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u16 page_burst_access; /* Multiple access word delay */ + u16 access; /* Start-cycle to first data valid delay */ + u16 rd_cycle; /* Total read cycle time */ + u16 wr_cycle; /* Total write cycle time */ +}; + +extern unsigned int gpmc_ns_to_ticks(unsigned int time_ns); + +extern void gpmc_cs_write_reg(int cs, int idx, u32 val); +extern u32 gpmc_cs_read_reg(int cs, int idx); +extern int gpmc_cs_calc_divider(int cs, unsigned int sync_clk); +extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t); +extern unsigned long gpmc_cs_get_base_addr(int cs); + + +#endif diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h index c7d9e85..481048d 100644 --- a/include/asm-arm/arch-omap/hardware.h +++ b/include/asm-arm/arch-omap/hardware.h @@ -297,6 +297,10 @@ #include "board-perseus2.h" #endif +#ifdef CONFIG_MACH_OMAP_FSAMPLE +#include "board-fsample.h" +#endif + #ifdef CONFIG_MACH_OMAP_H3 #include "board-h3.h" #endif diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h index 42098d9..2542495 100644 --- a/include/asm-arm/arch-omap/irqs.h +++ b/include/asm-arm/arch-omap/irqs.h @@ -242,10 +242,24 @@ #define INT_24XX_GPIO_BANK2 30 #define INT_24XX_GPIO_BANK3 31 #define INT_24XX_GPIO_BANK4 32 +#define INT_24XX_GPTIMER1 37 +#define INT_24XX_GPTIMER2 38 +#define INT_24XX_GPTIMER3 39 +#define INT_24XX_GPTIMER4 40 +#define INT_24XX_GPTIMER5 41 +#define INT_24XX_GPTIMER6 42 +#define INT_24XX_GPTIMER7 43 +#define INT_24XX_GPTIMER8 44 +#define INT_24XX_GPTIMER9 45 +#define INT_24XX_GPTIMER10 46 +#define INT_24XX_GPTIMER11 47 +#define INT_24XX_GPTIMER12 48 #define INT_24XX_MCBSP1_IRQ_TX 59 #define INT_24XX_MCBSP1_IRQ_RX 60 #define INT_24XX_MCBSP2_IRQ_TX 62 #define INT_24XX_MCBSP2_IRQ_RX 63 +#define INT_24XX_UART1_IRQ 72 +#define INT_24XX_UART2_IRQ 73 #define INT_24XX_UART3_IRQ 74 /* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and diff --git a/include/asm-arm/arch-omap/mux.h b/include/asm-arm/arch-omap/mux.h index 0dc24d4..679869c 100644 --- a/include/asm-arm/arch-omap/mux.h +++ b/include/asm-arm/arch-omap/mux.h @@ -410,6 +410,12 @@ enum omap24xx_index { /* 24xx clock */ W14_24XX_SYS_CLKOUT, + /* 24xx GPMC wait pin monitoring */ + L3_GPMC_WAIT0, + N7_GPMC_WAIT1, + M1_GPMC_WAIT2, + P1_GPMC_WAIT3, + /* 242X McBSP */ Y15_24XX_MCBSP2_CLKX, R14_24XX_MCBSP2_FSX, @@ -429,6 +435,26 @@ enum omap24xx_index { M15_24XX_GPIO92, V14_24XX_GPIO117, + /* 242x DBG GPIO */ + V4_242X_GPIO49, + W2_242X_GPIO50, + U4_242X_GPIO51, + V3_242X_GPIO52, + V2_242X_GPIO53, + V6_242X_GPIO53, + T4_242X_GPIO54, + Y4_242X_GPIO54, + T3_242X_GPIO55, + U2_242X_GPIO56, + + /* 24xx external DMA requests */ + AA10_242X_DMAREQ0, + AA6_242X_DMAREQ1, + E4_242X_DMAREQ2, + G4_242X_DMAREQ3, + D3_242X_DMAREQ4, + E3_242X_DMAREQ5, + P20_24XX_TSC_IRQ, /* UART3 */ diff --git a/include/asm-arm/arch-omap/pm.h b/include/asm-arm/arch-omap/pm.h index 05b003f..e46623c 100644 --- a/include/asm-arm/arch-omap/pm.h +++ b/include/asm-arm/arch-omap/pm.h @@ -299,10 +299,43 @@ enum omap24xx_save_state { OMAP24XX_SLEEP_SAVE_INTC_MIR0, OMAP24XX_SLEEP_SAVE_INTC_MIR1, OMAP24XX_SLEEP_SAVE_INTC_MIR2, + + OMAP24XX_SLEEP_SAVE_CM_CLKSTCTRL_MPU, + OMAP24XX_SLEEP_SAVE_CM_CLKSTCTRL_CORE, + OMAP24XX_SLEEP_SAVE_CM_CLKSTCTRL_GFX, + OMAP24XX_SLEEP_SAVE_CM_CLKSTCTRL_DSP, + OMAP24XX_SLEEP_SAVE_CM_CLKSTCTRL_MDM, + + OMAP24XX_SLEEP_SAVE_PM_PWSTCTRL_MPU, + OMAP24XX_SLEEP_SAVE_PM_PWSTCTRL_CORE, + OMAP24XX_SLEEP_SAVE_PM_PWSTCTRL_GFX, + OMAP24XX_SLEEP_SAVE_PM_PWSTCTRL_DSP, + OMAP24XX_SLEEP_SAVE_PM_PWSTCTRL_MDM, + + OMAP24XX_SLEEP_SAVE_CM_IDLEST1_CORE, + OMAP24XX_SLEEP_SAVE_CM_IDLEST2_CORE, + OMAP24XX_SLEEP_SAVE_CM_IDLEST3_CORE, + OMAP24XX_SLEEP_SAVE_CM_IDLEST4_CORE, + OMAP24XX_SLEEP_SAVE_CM_IDLEST_GFX, + OMAP24XX_SLEEP_SAVE_CM_IDLEST_WKUP, + OMAP24XX_SLEEP_SAVE_CM_IDLEST_CKGEN, + OMAP24XX_SLEEP_SAVE_CM_IDLEST_DSP, + OMAP24XX_SLEEP_SAVE_CM_IDLEST_MDM, + + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE1_CORE, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE2_CORE, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE3_CORE, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE4_CORE, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE_WKUP, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE_PLL, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE_DSP, + OMAP24XX_SLEEP_SAVE_CM_AUTOIDLE_MDM, + OMAP24XX_SLEEP_SAVE_CM_FCLKEN1_CORE, OMAP24XX_SLEEP_SAVE_CM_FCLKEN2_CORE, OMAP24XX_SLEEP_SAVE_CM_ICLKEN1_CORE, OMAP24XX_SLEEP_SAVE_CM_ICLKEN2_CORE, + OMAP24XX_SLEEP_SAVE_CM_ICLKEN3_CORE, OMAP24XX_SLEEP_SAVE_CM_ICLKEN4_CORE, OMAP24XX_SLEEP_SAVE_GPIO1_IRQENABLE1, OMAP24XX_SLEEP_SAVE_GPIO2_IRQENABLE1, diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h index 9f83f4a..f5cc65d 100644 --- a/include/asm-arm/arch-pxa/pxa-regs.h +++ b/include/asm-arm/arch-pxa/pxa-regs.h @@ -1329,6 +1329,7 @@ #define GPIO84_NSRXD 84 /* NSSP receive */ #define GPIO85_nPCE_1 85 /* Card Enable for Card Space (PXA27x) */ #define GPIO92_MMCDAT0 92 /* MMC DAT0 (PXA27x) */ +#define GPIO102_nPCE_1 102 /* PCMCIA (PXA27x) */ #define GPIO109_MMCDAT1 109 /* MMC DAT1 (PXA27x) */ #define GPIO110_MMCDAT2 110 /* MMC DAT2 (PXA27x) */ #define GPIO110_MMCCS0 110 /* MMC Chip Select 0 (PXA27x) */ @@ -1471,6 +1472,7 @@ #define GPIO84_NSSP_RX (84 | GPIO_ALT_FN_2_IN) #define GPIO85_nPCE_1_MD (85 | GPIO_ALT_FN_1_OUT) #define GPIO92_MMCDAT0_MD (92 | GPIO_ALT_FN_1_OUT) +#define GPIO102_nPCE_1_MD (102 | GPIO_ALT_FN_1_OUT) #define GPIO104_pSKTSEL_MD (104 | GPIO_ALT_FN_1_OUT) #define GPIO109_MMCDAT1_MD (109 | GPIO_ALT_FN_1_OUT) #define GPIO110_MMCDAT2_MD (110 | GPIO_ALT_FN_1_OUT) diff --git a/include/asm-arm/arch-pxa/trizeps4.h b/include/asm-arm/arch-pxa/trizeps4.h new file mode 100644 index 0000000..641d0ec --- /dev/null +++ b/include/asm-arm/arch-pxa/trizeps4.h @@ -0,0 +1,106 @@ +/************************************************************************ + * Include file for TRIZEPS4 SoM and ConXS eval-board + * Copyright (c) Jürgen Schindele + * 2006 + ************************************************************************/ + +/* + * Includes/Defines + */ +#ifndef _TRIPEPS4_H_ +#define _TRIPEPS4_H_ + +/* physical memory regions */ +#define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ +#define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ +#define TRIZEPS4_ETH_PHYS (PXA_CS2_PHYS) /* Ethernet DM9000 region */ +#define TRIZEPS4_PIC_PHYS (PXA_CS3_PHYS) /* Logic chip on ConXS-Board */ +#define TRIZEPS4_SDRAM_BASE 0xa0000000 /* SDRAM region */ + +#define TRIZEPS4_CFSR_PHYS (PXA_CS3_PHYS) /* Logic chip on ConXS-Board CSFR register */ +#define TRIZEPS4_BOCR_PHYS (PXA_CS3_PHYS+0x02000000) /* Logic chip on ConXS-Board BOCR register */ +#define TRIZEPS4_IRCR_PHYS (PXA_CS3_PHYS+0x02400000) /* Logic chip on ConXS-Board IRCR register*/ +#define TRIZEPS4_UPSR_PHYS (PXA_CS3_PHYS+0x02800000) /* Logic chip on ConXS-Board UPSR register*/ +#define TRIZEPS4_DICR_PHYS (PXA_CS3_PHYS+0x03800000) /* Logic chip on ConXS-Board DICR register*/ + +/* virtual memory regions */ +#define TRIZEPS4_DISK_VIRT 0xF0000000 /* Disk On Chip region */ + +#define TRIZEPS4_PIC_VIRT 0xF0100000 /* not used */ +#define TRIZEPS4_CFSR_VIRT 0xF0100000 +#define TRIZEPS4_BOCR_VIRT 0xF0200000 +#define TRIZEPS4_DICR_VIRT 0xF0300000 +#define TRIZEPS4_IRCR_VIRT 0xF0400000 +#define TRIZEPS4_UPSR_VIRT 0xF0500000 + +/* size of flash */ +#define TRIZEPS4_FLASH_SIZE 0x02000000 /* Flash size 32 MB */ + +/* Ethernet Controller Davicom DM9000 */ +#define GPIO_DM9000 101 +#define TRIZEPS4_ETH_IRQ IRQ_GPIO(GPIO_DM9000) + +/* UCB1400 audio / TS-controller */ +#define GPIO_UCB1400 1 +#define TRIZEPS4_UCB1400_IRQ IRQ_GPIO(GPIO_UCB1400) + +/* PCMCIA socket Compact Flash */ +#define GPIO_PCD 11 /* PCMCIA Card Detect */ +#define TRIZEPS4_CD_IRQ IRQ_GPIO(GPIO_PCD) +#define GPIO_PRDY 13 /* READY / nINT */ +#define TRIZEPS4_READY_NINT IRQ_GPIO(GPIO_PRDY) + +/* MMC socket */ +#define GPIO_MMC_DET 12 +#define TRIZEPS4_MMC_IRQ IRQ_GPIO(GPIO_MMC_DET) + +/* LEDS using tx2 / rx2 */ +#define GPIO_SYS_BUSY_LED 46 +#define GPIO_HEARTBEAT_LED 47 + +/* Off-module PIC on ConXS board */ +#define GPIO_PIC 0 +#define TRIZEPS4_PIC_IRQ IRQ_GPIO(GPIO_PIC) + +#define CFSR_P2V(x) ((x) - TRIZEPS4_CFSR_PHYS + TRIZEPS4_CFSR_VIRT) +#define CFSR_V2P(x) ((x) - TRIZEPS4_CFSR_VIRT + TRIZEPS4_CFSR_PHYS) + +#define BCR_P2V(x) ((x) - TRIZEPS4_BOCR_PHYS + TRIZEPS4_BOCR_VIRT) +#define BCR_V2P(x) ((x) - TRIZEPS4_BOCR_VIRT + TRIZEPS4_BOCR_PHYS) + +#define DCR_P2V(x) ((x) - TRIZEPS4_DICR_PHYS + TRIZEPS4_DICR_VIRT) +#define DCR_V2P(x) ((x) - TRIZEPS4_DICR_VIRT + TRIZEPS4_DICR_PHYS) + +#ifndef __ASSEMBLY__ +#define ConXS_CFSR (*((volatile unsigned short *)CFSR_P2V(0x0C000000))) +#define ConXS_BCR (*((volatile unsigned short *)BCR_P2V(0x0E000000))) +#define ConXS_DCR (*((volatile unsigned short *)DCR_P2V(0x0F800000))) +#else +#define ConXS_CFSR CFSR_P2V(0x0C000000) +#define ConXS_BCR BCR_P2V(0x0E000000) +#define ConXS_DCR DCR_P2V(0x0F800000) +#endif + +#define ConXS_CFSR_BVD_MASK 0x0003 +#define ConXS_CFSR_BVD1 (1 << 0) +#define ConXS_CFSR_BVD2 (1 << 1) +#define ConXS_CFSR_VS_MASK 0x000C +#define ConXS_CFSR_VS1 (1 << 2) +#define ConXS_CFSR_VS2 (1 << 3) +#define ConXS_CFSR_VS_5V (0x3 << 2) +#define ConXS_CFSR_VS_3V3 0x0 + +#define ConXS_BCR_S0_POW_EN0 (1 << 0) +#define ConXS_BCR_S0_POW_EN1 (1 << 1) +#define ConXS_BCR_L_DISP (1 << 4) +#define ConXS_BCR_CF_BUF_EN (1 << 5) +#define ConXS_BCR_CF_RESET (1 << 7) +#define ConXS_BCR_S0_VCC_3V3 0x1 +#define ConXS_BCR_S0_VCC_5V0 0x2 +#define ConXS_BCR_S0_VPP_12V 0x4 +#define ConXS_BCR_S0_VPP_3V3 0x8 + +#define ConXS_IRCR_MODE (1 << 0) +#define ConXS_IRCR_SD (1 << 1) + +#endif /* _TRIPEPS4_H_ */ diff --git a/include/asm-arm/dyntick.h b/include/asm-arm/dyntick.h new file mode 100644 index 0000000..19fab2d --- /dev/null +++ b/include/asm-arm/dyntick.h @@ -0,0 +1,6 @@ +#ifndef _ASMARM_DYNTICK_H +#define _ASMARM_DYNTICK_H + +#include <asm/mach/time.h> + +#endif /* _ASMARM_DYNTICK_H */ diff --git a/include/asm-arm/floppy.h b/include/asm-arm/floppy.h index aa0c8d2..54b5ae4 100644 --- a/include/asm-arm/floppy.h +++ b/include/asm-arm/floppy.h @@ -25,7 +25,7 @@ #define fd_inb(port) inb((port)) #define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ - SA_INTERRUPT,"floppy",NULL) + IRQF_DISABLED,"floppy",NULL) #define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) #define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) #define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) diff --git a/include/asm-arm/hw_irq.h b/include/asm-arm/hw_irq.h new file mode 100644 index 0000000..ea85697 --- /dev/null +++ b/include/asm-arm/hw_irq.h @@ -0,0 +1,20 @@ +/* + * Nothing to see here yet + */ +#ifndef _ARCH_ARM_HW_IRQ_H +#define _ARCH_ARM_HW_IRQ_H + +#include <asm/mach/irq.h> + +#if defined(CONFIG_NO_IDLE_HZ) +# include <asm/dyntick.h> +# define handle_dynamic_tick(action) \ + if (!(action->flags & IRQF_TIMER) && system_timer->dyn_tick) { \ + write_seqlock(&xtime_lock); \ + if (system_timer->dyn_tick->state & DYN_TICK_ENABLED) \ + system_timer->dyn_tick->handler(irq, 0, regs); \ + write_sequnlock(&xtime_lock); \ + } +#endif + +#endif diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h index 66e67e6..283af50 100644 --- a/include/asm-arm/irq.h +++ b/include/asm-arm/irq.h @@ -21,18 +21,13 @@ struct irqaction; -extern void disable_irq_nosync(unsigned int); -extern void disable_irq(unsigned int); -extern void enable_irq(unsigned int); - /* - * These correspond with the SA_TRIGGER_* defines, and therefore the - * IORESOURCE_IRQ_* defines. + * Migration helpers */ -#define __IRQT_RISEDGE (1 << 0) -#define __IRQT_FALEDGE (1 << 1) -#define __IRQT_HIGHLVL (1 << 2) -#define __IRQT_LOWLVL (1 << 3) +#define __IRQT_FALEDGE IRQ_TYPE_EDGE_FALLING +#define __IRQT_RISEDGE IRQ_TYPE_EDGE_RISING +#define __IRQT_LOWLVL IRQ_TYPE_LEVEL_LOW +#define __IRQT_HIGHLVL IRQ_TYPE_LEVEL_HIGH #define IRQT_NOEDGE (0) #define IRQT_RISING (__IRQT_RISEDGE) @@ -40,12 +35,7 @@ extern void enable_irq(unsigned int); #define IRQT_BOTHEDGE (__IRQT_RISEDGE|__IRQT_FALEDGE) #define IRQT_LOW (__IRQT_LOWLVL) #define IRQT_HIGH (__IRQT_HIGHLVL) -#define IRQT_PROBE (1 << 4) - -int set_irq_type(unsigned int irq, unsigned int type); -void disable_irq_wake(unsigned int irq); -void enable_irq_wake(unsigned int irq); -int setup_irq(unsigned int, struct irqaction *); +#define IRQT_PROBE IRQ_TYPE_PROBE extern void migrate_irqs(void); #endif diff --git a/include/asm-arm/mach/irq.h b/include/asm-arm/mach/irq.h index d4d420e..131f337 100644 --- a/include/asm-arm/mach/irq.h +++ b/include/asm-arm/mach/irq.h @@ -10,95 +10,9 @@ #ifndef __ASM_ARM_MACH_IRQ_H #define __ASM_ARM_MACH_IRQ_H -struct irqdesc; -struct pt_regs; -struct seq_file; - -typedef void (*irq_handler_t)(unsigned int, struct irqdesc *, struct pt_regs *); -typedef void (*irq_control_t)(unsigned int); - -struct irqchip { - /* - * Acknowledge the IRQ. - * If this is a level-based IRQ, then it is expected to mask the IRQ - * as well. - */ - void (*ack)(unsigned int); - /* - * Mask the IRQ in hardware. - */ - void (*mask)(unsigned int); - /* - * Unmask the IRQ in hardware. - */ - void (*unmask)(unsigned int); - /* - * Ask the hardware to re-trigger the IRQ. - * Note: This method _must_ _not_ call the interrupt handler. - * If you are unable to retrigger the interrupt, do not - * provide a function, or if you do, return non-zero. - */ - int (*retrigger)(unsigned int); - /* - * Set the type of the IRQ. - */ - int (*set_type)(unsigned int, unsigned int); - /* - * Set wakeup-enable on the selected IRQ - */ - int (*set_wake)(unsigned int, unsigned int); - -#ifdef CONFIG_SMP - /* - * Route an interrupt to a CPU - */ - void (*set_cpu)(struct irqdesc *desc, unsigned int irq, unsigned int cpu); -#endif -}; - -struct irqdesc { - irq_handler_t handle; - struct irqchip *chip; - struct irqaction *action; - struct list_head pend; - void __iomem *base; - void *data; - unsigned int disable_depth; - - unsigned int triggered: 1; /* IRQ has occurred */ - unsigned int running : 1; /* IRQ is running */ - unsigned int pending : 1; /* IRQ is pending */ - unsigned int probing : 1; /* IRQ in use for a probe */ - unsigned int probe_ok : 1; /* IRQ can be used for probe */ - unsigned int valid : 1; /* IRQ claimable */ - unsigned int noautoenable : 1; /* don't automatically enable IRQ */ - unsigned int unused :25; - - unsigned int irqs_unhandled; - struct proc_dir_entry *procdir; - -#ifdef CONFIG_SMP - cpumask_t affinity; - unsigned int cpu; -#endif - - /* - * IRQ lock detection - */ - unsigned int lck_cnt; - unsigned int lck_pc; - unsigned int lck_jif; -}; - -extern struct irqdesc irq_desc[]; +#include <linux/irq.h> -/* - * Helpful inline function for calling irq descriptor handlers. - */ -static inline void desc_handle_irq(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) -{ - desc->handle(irq, desc, regs); -} +struct seq_file; /* * This is internal. Do not use it. @@ -106,18 +20,22 @@ static inline void desc_handle_irq(unsigned int irq, struct irqdesc *desc, struc extern void (*init_arch_irq)(void); extern void init_FIQ(void); extern int show_fiq_list(struct seq_file *, void *); -void __set_irq_handler(unsigned int irq, irq_handler_t, int); /* - * External stuff. + * Function wrappers + */ +#define set_irq_chipdata(irq, d) set_irq_chip_data(irq, d) +#define get_irq_chipdata(irq) get_irq_chip_data(irq) + +/* + * Obsolete inline function for calling irq descriptor handlers. */ -#define set_irq_handler(irq,handler) __set_irq_handler(irq,handler,0) -#define set_irq_chained_handler(irq,handler) __set_irq_handler(irq,handler,1) -#define set_irq_data(irq,d) do { irq_desc[irq].data = d; } while (0) -#define set_irq_chipdata(irq,d) do { irq_desc[irq].base = d; } while (0) -#define get_irq_chipdata(irq) (irq_desc[irq].base) +static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc, + struct pt_regs *regs) +{ + desc->handle_irq(irq, desc, regs); +} -void set_irq_chip(unsigned int irq, struct irqchip *); void set_irq_flags(unsigned int irq, unsigned int flags); #define IRQF_VALID (1 << 0) @@ -125,12 +43,25 @@ void set_irq_flags(unsigned int irq, unsigned int flags); #define IRQF_NOAUTOEN (1 << 2) /* - * Built-in IRQ handlers. + * This is for easy migration, but should be changed in the source */ -void do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); -void do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); -void do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); -void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs); -void dummy_mask_unmask_irq(unsigned int irq); +#define do_level_IRQ handle_level_irq +#define do_edge_IRQ handle_edge_irq +#define do_simple_IRQ handle_simple_irq +#define irqdesc irq_desc +#define irqchip irq_chip + +#define do_bad_IRQ(irq,desc,regs) \ +do { \ + spin_lock(&desc->lock); \ + handle_bad_irq(irq, desc, regs); \ + spin_unlock(&desc->lock); \ +} while(0) + +extern unsigned long irq_err_count; +static inline void ack_bad_irq(int irq) +{ + irq_err_count++; +} #endif diff --git a/include/asm-arm/mach/time.h b/include/asm-arm/mach/time.h index 9f28073..dee0bc3 100644 --- a/include/asm-arm/mach/time.h +++ b/include/asm-arm/mach/time.h @@ -69,6 +69,7 @@ extern void timer_tick(struct pt_regs *); /* * Kernel time keeping support. */ +struct timespec; extern int (*set_rtc)(void); extern void save_time_delta(struct timespec *delta, struct timespec *rtc); extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h index 94f973b..91d536c 100644 --- a/include/asm-arm/memory.h +++ b/include/asm-arm/memory.h @@ -68,6 +68,11 @@ */ #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) +/* + * Allow 16MB-aligned ioremap pages + */ +#define IOREMAP_MAX_ORDER 24 + #else /* CONFIG_MMU */ /* diff --git a/include/asm-arm/mmu.h b/include/asm-arm/mmu.h index 23dde52..fe2a23b 100644 --- a/include/asm-arm/mmu.h +++ b/include/asm-arm/mmu.h @@ -7,6 +7,7 @@ typedef struct { #if __LINUX_ARM_ARCH__ >= 6 unsigned int id; #endif + unsigned int kvm_seq; } mm_context_t; #if __LINUX_ARM_ARCH__ >= 6 diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h index 9fadb01..d1a65b1 100644 --- a/include/asm-arm/mmu_context.h +++ b/include/asm-arm/mmu_context.h @@ -17,6 +17,8 @@ #include <asm/cacheflush.h> #include <asm/proc-fns.h> +void __check_kvm_seq(struct mm_struct *mm); + #if __LINUX_ARM_ARCH__ >= 6 /* @@ -45,13 +47,21 @@ static inline void check_context(struct mm_struct *mm) { if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); + + if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) + __check_kvm_seq(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) #else -#define check_context(mm) do { } while (0) +static inline void check_context(struct mm_struct *mm) +{ + if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) + __check_kvm_seq(mm); +} + #define init_new_context(tsk,mm) 0 #endif diff --git a/include/asm-arm/pgtable-hwdef.h b/include/asm-arm/pgtable-hwdef.h index 1bc1f99..f3b5120 100644 --- a/include/asm-arm/pgtable-hwdef.h +++ b/include/asm-arm/pgtable-hwdef.h @@ -28,6 +28,7 @@ */ #define PMD_SECT_BUFFERABLE (1 << 2) #define PMD_SECT_CACHEABLE (1 << 3) +#define PMD_SECT_XN (1 << 4) /* v6 */ #define PMD_SECT_AP_WRITE (1 << 10) #define PMD_SECT_AP_READ (1 << 11) #define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h index 8425260..edb7b65 100644 --- a/include/asm-arm/procinfo.h +++ b/include/asm-arm/procinfo.h @@ -29,7 +29,8 @@ struct processor; struct proc_info_list { unsigned int cpu_val; unsigned int cpu_mask; - unsigned long __cpu_mmu_flags; /* used by head.S */ + unsigned long __cpu_mm_mmu_flags; /* used by head.S */ + unsigned long __cpu_io_mmu_flags; /* used by head.S */ unsigned long __cpu_flush; /* used by head.S */ const char *arch_name; const char *elf_name; diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h index ced6916..d0fb487 100644 --- a/include/asm-arm/signal.h +++ b/include/asm-arm/signal.h @@ -82,7 +82,6 @@ typedef unsigned long sigset_t; * is running in 26-bit. * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)). * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_NODEFER prevents the current signal from being masked in the handler. * SA_RESETHAND clears the handler when the signal is delivered. * @@ -101,7 +100,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ /* @@ -113,10 +111,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ -#define SA_TIMER 0x40000000 -#endif - #include <asm-generic/signal.h> #ifdef __KERNEL__ diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h index 8a7554f..f28b236 100644 --- a/include/asm-arm/thread_info.h +++ b/include/asm-arm/thread_info.h @@ -111,6 +111,7 @@ extern void iwmmxt_task_disable(struct thread_info *); extern void iwmmxt_task_copy(struct thread_info *, void *); extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); +extern void iwmmxt_task_switch(struct thread_info *); #endif diff --git a/include/asm-arm26/floppy.h b/include/asm-arm26/floppy.h index a18af06..efb7321 100644 --- a/include/asm-arm26/floppy.h +++ b/include/asm-arm26/floppy.h @@ -22,7 +22,7 @@ #define fd_inb(port) inb((port)) #define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ - SA_INTERRUPT,"floppy",NULL) + IRQF_DISABLED,"floppy",NULL) #define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) #define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) #define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) diff --git a/include/asm-arm26/signal.h b/include/asm-arm26/signal.h index 37ad253..967ba49 100644 --- a/include/asm-arm26/signal.h +++ b/include/asm-arm26/signal.h @@ -82,7 +82,6 @@ typedef unsigned long sigset_t; * is running in 26-bit. * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)). * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_NODEFER prevents the current signal from being masked in the handler. * SA_RESETHAND clears the handler when the signal is delivered. * @@ -101,7 +100,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ /* diff --git a/include/asm-cris/arch-v10/irq.h b/include/asm-cris/arch-v10/irq.h index 4fa8945..b1128a9 100644 --- a/include/asm-cris/arch-v10/irq.h +++ b/include/asm-cris/arch-v10/irq.h @@ -141,7 +141,7 @@ __asm__ ( \ * it here, we would not get the multiple_irq at all. * * The non-blocking here is based on the knowledge that the timer interrupt is - * registred as a fast interrupt (SA_INTERRUPT) so that we _know_ there will not + * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not * be an sti() before the timer irq handler is run to acknowledge the interrupt. */ diff --git a/include/asm-cris/arch-v32/irq.h b/include/asm-cris/arch-v32/irq.h index eeb0a80..bac94ee 100644 --- a/include/asm-cris/arch-v32/irq.h +++ b/include/asm-cris/arch-v32/irq.h @@ -98,7 +98,7 @@ __asm__ ( \ * if we had BLOCK'edit here, we would not get the multiple_irq at all. * * The non-blocking here is based on the knowledge that the timer interrupt is - * registred as a fast interrupt (SA_INTERRUPT) so that we _know_ there will not + * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not * be an sti() before the timer irq handler is run to acknowledge the interrupt. */ #define BUILD_TIMER_IRQ(nr, mask) \ diff --git a/include/asm-cris/signal.h b/include/asm-cris/signal.h index dfe0395..349ae68 100644 --- a/include/asm-cris/signal.h +++ b/include/asm-cris/signal.h @@ -74,7 +74,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -95,7 +94,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-frv/irq-routing.h b/include/asm-frv/irq-routing.h index 686fb2b..ac3ab90 100644 --- a/include/asm-frv/irq-routing.h +++ b/include/asm-frv/irq-routing.h @@ -51,7 +51,7 @@ struct irq_source { struct irq_level { int usage; int disable_count; - unsigned long flags; /* current SA_INTERRUPT and SA_SHIRQ settings */ + unsigned long flags; /* current IRQF_DISABLED and IRQF_SHARED settings */ spinlock_t lock; struct irq_source *sources; }; diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h index dcc1b35..2079197 100644 --- a/include/asm-frv/signal.h +++ b/include/asm-frv/signal.h @@ -74,7 +74,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -94,7 +93,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h index 5cf8b7c..254a126 100644 --- a/include/asm-generic/mutex-null.h +++ b/include/asm-generic/mutex-null.h @@ -10,15 +10,10 @@ #ifndef _ASM_GENERIC_MUTEX_NULL_H #define _ASM_GENERIC_MUTEX_NULL_H -/* extra parameter only needed for mutex debugging: */ -#ifndef __IP__ -# define __IP__ -#endif - -#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__) -#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) -#define __mutex_slowpath_needs_to_unlock() 1 +#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) +#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) +#define __mutex_slowpath_needs_to_unlock() 1 #endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index c745211..e160e04 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -7,6 +7,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; +#define per_cpu_offset(x) (__per_cpu_offset[x]) + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name diff --git a/include/asm-h8300/signal.h b/include/asm-h8300/signal.h index 8eccdc1..7bc1504 100644 --- a/include/asm-h8300/signal.h +++ b/include/asm-h8300/signal.h @@ -74,7 +74,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -94,7 +93,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h index 9cb2793..359ead6 100644 --- a/include/asm-i386/floppy.h +++ b/include/asm-i386/floppy.h @@ -144,11 +144,11 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { if(can_use_virtual_dma) - return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy", NULL); else - return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_interrupt, + IRQF_DISABLED, "floppy", NULL); } diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h new file mode 100644 index 0000000..e1bdb97 --- /dev/null +++ b/include/asm-i386/irqflags.h @@ -0,0 +1,127 @@ +/* + * include/asm-i386/irqflags.h + * + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() functions from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +static inline unsigned long __raw_local_save_flags(void) +{ + unsigned long flags; + + __asm__ __volatile__( + "pushfl ; popl %0" + : "=g" (flags) + : /* no input */ + ); + + return flags; +} + +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) + +static inline void raw_local_irq_restore(unsigned long flags) +{ + __asm__ __volatile__( + "pushl %0 ; popfl" + : /* no output */ + :"g" (flags) + :"memory", "cc" + ); +} + +static inline void raw_local_irq_disable(void) +{ + __asm__ __volatile__("cli" : : : "memory"); +} + +static inline void raw_local_irq_enable(void) +{ + __asm__ __volatile__("sti" : : : "memory"); +} + +/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +static inline void raw_safe_halt(void) +{ + __asm__ __volatile__("sti; hlt" : : : "memory"); +} + +/* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +static inline void halt(void) +{ + __asm__ __volatile__("hlt": : :"memory"); +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1 << 9)); +} + +static inline int raw_irqs_disabled(void) +{ + unsigned long flags = __raw_local_save_flags(); + + return raw_irqs_disabled_flags(flags); +} + +/* + * For spinlocks, etc: + */ +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_disable(); + + return flags; +} + +#define raw_local_irq_save(flags) \ + do { (flags) = __raw_local_irq_save(); } while (0) + +#endif /* __ASSEMBLY__ */ + +/* + * Do the CPU's IRQ-state tracing from assembly code. We call a + * C function, so save all the C-clobbered registers: + */ +#ifdef CONFIG_TRACE_IRQFLAGS + +# define TRACE_IRQS_ON \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call trace_hardirqs_on; \ + popl %edx; \ + popl %ecx; \ + popl %eax; + +# define TRACE_IRQS_OFF \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call trace_hardirqs_off; \ + popl %edx; \ + popl %ecx; \ + popl %eax; + +#else +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +#endif + +#endif diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h index be4ab85..2f07601 100644 --- a/include/asm-i386/rwsem.h +++ b/include/asm-i386/rwsem.h @@ -40,6 +40,7 @@ #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/lockdep.h> struct rwsem_waiter; @@ -61,36 +62,34 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; #endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -#define __RWSEM_DEBUG_INIT /* */ +# define __RWSEM_DEP_MAP_INIT(lockname) #endif + #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -} +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) /* * lock for reading @@ -143,7 +142,7 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t" /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { int tmp; @@ -167,6 +166,11 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the : "memory", "cc"); } +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + /* * trylock for writing -- returns 1 if successful, 0 if contention */ diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 026fd23..3824a50 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -77,7 +77,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -97,7 +96,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 04ba302..87c40f8 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -31,6 +31,11 @@ "jmp 1b\n" \ "3:\n\t" +/* + * NOTE: there's an irqs-on section here, which normally would have to be + * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use + * __raw_spin_lock_string_flags(). + */ #define __raw_spin_lock_string_flags \ "\n1:\t" \ "lock ; decb %0\n\t" \ @@ -63,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) "=m" (lock->slock) : : "memory"); } +/* + * It is easier for the lock validator if interrupts are not re-enabled + * in the middle of a lock-acquire. This is a performance feature anyway + * so we turn it off: + */ +#ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { alternative_smp( @@ -70,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla __raw_spin_lock_string_up, "=m" (lock->slock) : "r" (flags) : "memory"); } +#endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index cab0180..db398d8 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -456,25 +456,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #define set_wmb(var, value) do { var = value; wmb(); } while (0) -/* interrupt control.. */ -#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) -#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -/* used when interrupts are already enabled or to shutdown the processor */ -#define halt() __asm__ __volatile__("hlt": : :"memory") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & (1<<9)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") +#include <linux/irqflags.h> /* * disable hlt during certain critical i/o operations diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index 24d898b..fbe5cf3 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -36,6 +36,7 @@ #ifdef CONFIG_SMP extern unsigned long __per_cpu_offset[NR_CPUS]; +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index 1327c91..2d1640c 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h @@ -33,9 +33,6 @@ struct rw_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) @@ -45,19 +42,9 @@ struct rw_semaphore { #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) -/* - * initialization - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -73,9 +60,6 @@ init_rwsem (struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 5e328ed..4f5ca56 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -56,7 +56,6 @@ * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -76,7 +75,6 @@ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 @@ -114,8 +112,6 @@ #define _NSIG_BPW 64 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) -#define SA_PERCPU_IRQ 0x02000000 - #endif /* __KERNEL__ */ #include <asm-generic/signal.h> diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 8bc9869..8adcde0 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -68,7 +68,7 @@ struct thread_info { #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) +#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) #endif /* !__ASSEMBLY */ diff --git a/include/asm-m32r/signal.h b/include/asm-m32r/signal.h index 95f69b1..e750045 100644 --- a/include/asm-m32r/signal.h +++ b/include/asm-m32r/signal.h @@ -81,7 +81,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -101,7 +100,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 66c4742f..311cebf 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -18,7 +18,7 @@ * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. * - * `next' and `prev' should be task_t, but it isn't always defined + * `next' and `prev' should be struct task_struct, but it isn't always defined */ #define switch_to(prev, next, last) do { \ diff --git a/include/asm-m68k/floppy.h b/include/asm-m68k/floppy.h index 63a05ed..57f4fdd 100644 --- a/include/asm-m68k/floppy.h +++ b/include/asm-m68k/floppy.h @@ -88,8 +88,8 @@ static __inline__ void fd_outb(unsigned char value, int port) static int fd_request_irq(void) { if(MACH_IS_Q40) - return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, - "floppy", floppy_hardint); + return request_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy", floppy_hardint); else if(MACH_IS_SUN3X) return sun3xflop_request_irq(); return -ENXIO; diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h index f4ae7d8..3257f98 100644 --- a/include/asm-m68k/irq.h +++ b/include/asm-m68k/irq.h @@ -67,8 +67,8 @@ struct pt_regs; /* * various flags for request_irq() - the Amiga now uses the standard - * mechanism like all other architectures - SA_INTERRUPT and SA_SHIRQ - * are your friends. + * mechanism like all other architectures - IRQF_DISABLED and + * IRQF_SHARED are your friends. */ #ifndef MACH_AMIGA_ONLY #define IRQ_FLG_LOCK (0x0001) /* handler is not replaceable */ diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h index 85037a3..de1ba6e 100644 --- a/include/asm-m68k/signal.h +++ b/include/asm-m68k/signal.h @@ -74,7 +74,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -94,7 +93,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ /* * sigaltstack controls diff --git a/include/asm-m68k/sun3xflop.h b/include/asm-m68k/sun3xflop.h index 98a9f79..ca8cc41 100644 --- a/include/asm-m68k/sun3xflop.h +++ b/include/asm-m68k/sun3xflop.h @@ -208,7 +208,8 @@ static int sun3xflop_request_irq(void) if(!once) { once = 1; - error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", NULL); + error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, + IRQF_DISABLED, "floppy", NULL); return ((error == 0) ? 0 : -1); } else return 0; } diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h index 5355727..45e7a2fd 100644 --- a/include/asm-m68knommu/irq.h +++ b/include/asm-m68knommu/irq.h @@ -62,8 +62,8 @@ extern void (*mach_disable_irq)(unsigned int); /* * various flags for request_irq() - the Amiga now uses the standard - * mechanism like all other architectures - SA_INTERRUPT and SA_SHIRQ - * are your friends. + * mechanism like all other architectures - IRQF_DISABLED and + * IRQF_SHARED are your friends. */ #define IRQ_FLG_LOCK (0x0001) /* handler is not replaceable */ #define IRQ_FLG_REPLACE (0x0002) /* replace existing handler */ diff --git a/include/asm-m68knommu/signal.h b/include/asm-m68knommu/signal.h index 1d13187..216c08b 100644 --- a/include/asm-m68knommu/signal.h +++ b/include/asm-m68knommu/signal.h @@ -74,7 +74,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -94,7 +93,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ /* * sigaltstack controls diff --git a/include/asm-mips/mach-generic/floppy.h b/include/asm-mips/mach-generic/floppy.h index 83cd69e..001a8ce 100644 --- a/include/asm-mips/mach-generic/floppy.h +++ b/include/asm-mips/mach-generic/floppy.h @@ -98,7 +98,7 @@ static inline void fd_disable_irq(void) static inline int fd_request_irq(void) { return request_irq(FLOPPY_IRQ, floppy_interrupt, - SA_INTERRUPT, "floppy", NULL); + IRQF_DISABLED, "floppy", NULL); } static inline void fd_free_irq(void) diff --git a/include/asm-mips/mach-jazz/floppy.h b/include/asm-mips/mach-jazz/floppy.h index 9413117..56e9ca6 100644 --- a/include/asm-mips/mach-jazz/floppy.h +++ b/include/asm-mips/mach-jazz/floppy.h @@ -90,7 +90,7 @@ static inline void fd_disable_irq(void) static inline int fd_request_irq(void) { return request_irq(FLOPPY_IRQ, floppy_interrupt, - SA_INTERRUPT, "floppy", NULL); + IRQF_DISABLED, "floppy", NULL); } static inline void fd_free_irq(void) diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h index a1f3a3f..87a1dff 100644 --- a/include/asm-mips/signal.h +++ b/include/asm-mips/signal.h @@ -64,7 +64,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -84,7 +83,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 /* Only for o32 */ @@ -99,15 +97,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #ifdef __KERNEL__ -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ flag is for shared interrupt support on PCI and EISA. - */ -#define SA_SAMPLE_RANDOM SA_RESTART - #ifdef CONFIG_TRAD_SIGNALS #define sig_uses_siginfo(ka) ((ka)->sa.sa_flags & SA_SIGINFO) #else diff --git a/include/asm-parisc/floppy.h b/include/asm-parisc/floppy.h index 458cdb2..da2f9c1 100644 --- a/include/asm-parisc/floppy.h +++ b/include/asm-parisc/floppy.h @@ -156,11 +156,11 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { if(can_use_virtual_dma) - return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy", NULL); else - return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_interrupt, + IRQF_DISABLED, "floppy", NULL); } static unsigned long dma_mem_alloc(unsigned long size) diff --git a/include/asm-parisc/signal.h b/include/asm-parisc/signal.h index 25cb23e..98a82fa 100644 --- a/include/asm-parisc/signal.h +++ b/include/asm-parisc/signal.h @@ -48,7 +48,6 @@ * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -69,7 +68,6 @@ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 /* obsolete -- ignored */ diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h index 9c8d91b..fd242a2 100644 --- a/include/asm-powerpc/floppy.h +++ b/include/asm-powerpc/floppy.h @@ -27,7 +27,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \ - SA_INTERRUPT, "floppy", NULL) + IRQF_DISABLED, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); #ifdef CONFIG_PCI diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h index 0392159..c80e113 100644 --- a/include/asm-powerpc/i8259.h +++ b/include/asm-powerpc/i8259.h @@ -4,11 +4,13 @@ #include <linux/irq.h> -extern struct hw_interrupt_type i8259_pic; - +#ifdef CONFIG_PPC_MERGE +extern void i8259_init(struct device_node *node, unsigned long intack_addr); +extern unsigned int i8259_irq(struct pt_regs *regs); +#else extern void i8259_init(unsigned long intack_addr, int offset); extern int i8259_irq(struct pt_regs *regs); -extern int i8259_irq_cascade(struct pt_regs *regs, void *unused); +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_I8259_H */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index eb5f33e..e057547 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -9,26 +9,14 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/config.h> #include <linux/threads.h> +#include <linux/list.h> +#include <linux/radix-tree.h> #include <asm/types.h> #include <asm/atomic.h> -/* this number is used when no interrupt has been assigned */ -#define NO_IRQ (-1) - -/* - * These constants are used for passing information about interrupt - * signal polarity and level/edge sensing to the low-level PIC chip - * drivers. - */ -#define IRQ_SENSE_MASK 0x1 -#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */ -#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */ - -#define IRQ_POLARITY_MASK 0x2 -#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ -#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ #define get_irq_desc(irq) (&irq_desc[(irq)]) @@ -36,50 +24,325 @@ #define for_each_irq(i) \ for ((i) = 0; (i) < NR_IRQS; ++(i)) -#ifdef CONFIG_PPC64 +extern atomic_t ppc_n_lost_interrupts; -/* - * Maximum number of interrupt sources that we can handle. +#ifdef CONFIG_PPC_MERGE + +/* This number is used when no interrupt has been assigned */ +#define NO_IRQ (0) + +/* This is a special irq number to return from get_irq() to tell that + * no interrupt happened _and_ ignore it (don't count it as bad). Some + * platforms like iSeries rely on that. */ +#define NO_IRQ_IGNORE ((unsigned int)-1) + +/* Total number of virq in the platform (make it a CONFIG_* option ? */ #define NR_IRQS 512 -/* Interrupt numbers are virtual in case they are sparsely - * distributed by the hardware. +/* Number of irqs reserved for the legacy controller */ +#define NUM_ISA_INTERRUPTS 16 + +/* This type is the placeholder for a hardware interrupt number. It has to + * be big enough to enclose whatever representation is used by a given + * platform. + */ +typedef unsigned long irq_hw_number_t; + +/* Interrupt controller "host" data structure. This could be defined as a + * irq domain controller. That is, it handles the mapping between hardware + * and virtual interrupt numbers for a given interrupt domain. The host + * structure is generally created by the PIC code for a given PIC instance + * (though a host can cover more than one PIC if they have a flat number + * model). It's the host callbacks that are responsible for setting the + * irq_chip on a given irq_desc after it's been mapped. + * + * The host code and data structures are fairly agnostic to the fact that + * we use an open firmware device-tree. We do have references to struct + * device_node in two places: in irq_find_host() to find the host matching + * a given interrupt controller node, and of course as an argument to its + * counterpart host->ops->match() callback. However, those are treated as + * generic pointers by the core and the fact that it's actually a device-node + * pointer is purely a convention between callers and implementation. This + * code could thus be used on other architectures by replacing those two + * by some sort of arch-specific void * "token" used to identify interrupt + * controllers. */ -extern unsigned int virt_irq_to_real_map[NR_IRQS]; +struct irq_host; +struct radix_tree_root; -/* The maximum virtual IRQ number that we support. This - * can be set by the platform and will be reduced by the - * value of __irq_offset_value. It defaults to and is - * capped by (NR_IRQS - 1). +/* Functions below are provided by the host and called whenever a new mapping + * is created or an old mapping is disposed. The host can then proceed to + * whatever internal data structures management is required. It also needs + * to setup the irq_desc when returning from map(). */ -extern unsigned int virt_irq_max; +struct irq_host_ops { + /* Match an interrupt controller device node to a host, returns + * 1 on a match + */ + int (*match)(struct irq_host *h, struct device_node *node); + + /* Create or update a mapping between a virtual irq number and a hw + * irq number. This can be called several times for the same mapping + * but with different flags, though unmap shall always be called + * before the virq->hw mapping is changed. + */ + int (*map)(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw, unsigned int flags); + + /* Dispose of such a mapping */ + void (*unmap)(struct irq_host *h, unsigned int virq); + + /* Translate device-tree interrupt specifier from raw format coming + * from the firmware to a irq_hw_number_t (interrupt line number) and + * trigger flags that can be passed to irq_create_mapping(). + * If no translation is provided, raw format is assumed to be one cell + * for interrupt line and default sense. + */ + int (*xlate)(struct irq_host *h, struct device_node *ctrler, + u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags); +}; + +struct irq_host { + struct list_head link; + + /* type of reverse mapping technique */ + unsigned int revmap_type; +#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ +#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ +#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ +#define IRQ_HOST_MAP_TREE 3 /* radix tree */ + union { + struct { + unsigned int size; + unsigned int *revmap; + } linear; + struct radix_tree_root tree; + } revmap_data; + struct irq_host_ops *ops; + void *host_data; + irq_hw_number_t inval_irq; +}; + +/* The main irq map itself is an array of NR_IRQ entries containing the + * associate host and irq number. An entry with a host of NULL is free. + * An entry can be allocated if it's free, the allocator always then sets + * hwirq first to the host's invalid irq number and then fills ops. + */ +struct irq_map_entry { + irq_hw_number_t hwirq; + struct irq_host *host; +}; + +extern struct irq_map_entry irq_map[NR_IRQS]; + -/* Create a mapping for a real_irq if it doesn't already exist. - * Return the virtual irq as a convenience. +/*** + * irq_alloc_host - Allocate a new irq_host data structure + * @node: device-tree node of the interrupt controller + * @revmap_type: type of reverse mapping to use + * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map + * @ops: map/unmap host callbacks + * @inval_irq: provide a hw number in that host space that is always invalid + * + * Allocates and initialize and irq_host structure. Note that in the case of + * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns + * for all legacy interrupts except 0 (which is always the invalid irq for + * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by + * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated + * later during boot automatically (the reverse mapping will use the slow path + * until that happens). + */ +extern struct irq_host *irq_alloc_host(unsigned int revmap_type, + unsigned int revmap_arg, + struct irq_host_ops *ops, + irq_hw_number_t inval_irq); + + +/*** + * irq_find_host - Locates a host for a given device node + * @node: device-tree node of the interrupt controller + */ +extern struct irq_host *irq_find_host(struct device_node *node); + + +/*** + * irq_set_default_host - Set a "default" host + * @host: default host pointer + * + * For convenience, it's possible to set a "default" host that will be used + * whenever NULL is passed to irq_create_mapping(). It makes life easier for + * platforms that want to manipulate a few hard coded interrupt numbers that + * aren't properly represented in the device-tree. + */ +extern void irq_set_default_host(struct irq_host *host); + + +/*** + * irq_set_virq_count - Set the maximum number of virt irqs + * @count: number of linux virtual irqs, capped with NR_IRQS + * + * This is mainly for use by platforms like iSeries who want to program + * the virtual irq number in the controller to avoid the reverse mapping + */ +extern void irq_set_virq_count(unsigned int count); + + +/*** + * irq_create_mapping - Map a hardware interrupt into linux virq space + * @host: host owning this hardware interrupt or NULL for default host + * @hwirq: hardware irq number in that host space + * @flags: flags passed to the controller. contains the trigger type among + * others. Use IRQ_TYPE_* defined in include/linux/irq.h + * + * Only one mapping per hardware interrupt is permitted. Returns a linux + * virq number. The flags can be used to provide sense information to the + * controller (typically extracted from the device-tree). If no information + * is passed, the controller defaults will apply (for example, xics can only + * do edge so flags are irrelevant for some pseries specific irqs). + * + * The device-tree generally contains the trigger info in an encoding that is + * specific to a given type of controller. In that case, you can directly use + * host->ops->trigger_xlate() to translate that. + * + * It is recommended that new PICs that don't have existing OF bindings chose + * to use a representation of triggers identical to linux. + */ +extern unsigned int irq_create_mapping(struct irq_host *host, + irq_hw_number_t hwirq, + unsigned int flags); + + +/*** + * irq_dispose_mapping - Unmap an interrupt + * @virq: linux virq number of the interrupt to unmap + */ +extern void irq_dispose_mapping(unsigned int virq); + +/*** + * irq_find_mapping - Find a linux virq from an hw irq number. + * @host: host owning this hardware interrupt + * @hwirq: hardware irq number in that host space + * + * This is a slow path, for use by generic code. It's expected that an + * irq controller implementation directly calls the appropriate low level + * mapping function. */ -int virt_irq_create_mapping(unsigned int real_irq); -void virt_irq_init(void); +extern unsigned int irq_find_mapping(struct irq_host *host, + irq_hw_number_t hwirq); -static inline unsigned int virt_irq_to_real(unsigned int virt_irq) + +/*** + * irq_radix_revmap - Find a linux virq from a hw irq number. + * @host: host owning this hardware interrupt + * @hwirq: hardware irq number in that host space + * + * This is a fast path, for use by irq controller code that uses radix tree + * revmaps + */ +extern unsigned int irq_radix_revmap(struct irq_host *host, + irq_hw_number_t hwirq); + +/*** + * irq_linear_revmap - Find a linux virq from a hw irq number. + * @host: host owning this hardware interrupt + * @hwirq: hardware irq number in that host space + * + * This is a fast path, for use by irq controller code that uses linear + * revmaps. It does fallback to the slow path if the revmap doesn't exist + * yet and will create the revmap entry with appropriate locking + */ + +extern unsigned int irq_linear_revmap(struct irq_host *host, + irq_hw_number_t hwirq); + + + +/*** + * irq_alloc_virt - Allocate virtual irq numbers + * @host: host owning these new virtual irqs + * @count: number of consecutive numbers to allocate + * @hint: pass a hint number, the allocator will try to use a 1:1 mapping + * + * This is a low level function that is used internally by irq_create_mapping() + * and that can be used by some irq controllers implementations for things + * like allocating ranges of numbers for MSIs. The revmaps are left untouched. + */ +extern unsigned int irq_alloc_virt(struct irq_host *host, + unsigned int count, + unsigned int hint); + +/*** + * irq_free_virt - Free virtual irq numbers + * @virq: virtual irq number of the first interrupt to free + * @count: number of interrupts to free + * + * This function is the opposite of irq_alloc_virt. It will not clear reverse + * maps, this should be done previously by unmap'ing the interrupt. In fact, + * all interrupts covered by the range being freed should have been unmapped + * prior to calling this. + */ +extern void irq_free_virt(unsigned int virq, unsigned int count); + + +/* -- OF helpers -- */ + +/* irq_create_of_mapping - Map a hardware interrupt into linux virq space + * @controller: Device node of the interrupt controller + * @inspec: Interrupt specifier from the device-tree + * @intsize: Size of the interrupt specifier from the device-tree + * + * This function is identical to irq_create_mapping except that it takes + * as input informations straight from the device-tree (typically the results + * of the of_irq_map_*() functions + */ +extern unsigned int irq_create_of_mapping(struct device_node *controller, + u32 *intspec, unsigned int intsize); + + +/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space + * @device: Device node of the device whose interrupt is to be mapped + * @index: Index of the interrupt to map + * + * This function is a wrapper that chains of_irq_map_one() and + * irq_create_of_mapping() to make things easier to callers + */ +extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index); + +/* -- End OF helpers -- */ + +/*** + * irq_early_init - Init irq remapping subsystem + */ +extern void irq_early_init(void); + +static __inline__ int irq_canonicalize(int irq) { - return virt_irq_to_real_map[virt_irq]; + return irq; } -extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); + +#else /* CONFIG_PPC_MERGE */ + +/* This number is used when no interrupt has been assigned */ +#define NO_IRQ (-1) +#define NO_IRQ_IGNORE (-2) + /* - * List of interrupt controllers. + * These constants are used for passing information about interrupt + * signal polarity and level/edge sensing to the low-level PIC chip + * drivers. */ -#define IC_INVALID 0 -#define IC_OPEN_PIC 1 -#define IC_PPC_XIC 2 -#define IC_CELL_PIC 3 -#define IC_ISERIES 4 +#define IRQ_SENSE_MASK 0x1 +#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */ +#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */ -extern u64 ppc64_interrupt_controller; +#define IRQ_POLARITY_MASK 0x2 +#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ +#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ -#else /* 32-bit */ #if defined(CONFIG_40x) #include <asm/ibm4xx.h> @@ -512,16 +775,11 @@ extern u64 ppc64_interrupt_controller; #endif /* CONFIG_8260 */ -#endif +#endif /* Whatever way too big #ifdef */ #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) /* pedantic: these are long because they are used with set_bit --RR */ extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; -extern atomic_t ppc_n_lost_interrupts; - -#define virt_irq_create_mapping(x) (x) - -#endif /* * Because many systems have two overlapping names spaces for @@ -560,6 +818,7 @@ static __inline__ int irq_canonicalize(int irq) irq = 9; return irq; } +#endif /* CONFIG_PPC_MERGE */ extern int distribute_irqs; @@ -579,9 +838,8 @@ extern struct thread_info *softirq_ctx[NR_CPUS]; extern void irq_ctx_init(void); extern void call_do_softirq(struct thread_info *tp); -extern int call___do_IRQ(int irq, struct pt_regs *regs, - struct thread_info *tp); - +extern int call_handle_irq(int irq, void *p1, void *p2, + struct thread_info *tp, void *func); #else #define irq_ctx_init() diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h new file mode 100644 index 0000000..7970cba --- /dev/null +++ b/include/asm-powerpc/irqflags.h @@ -0,0 +1,31 @@ +/* + * include/asm-powerpc/irqflags.h + * + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +/* + * Get definitions for raw_local_save_flags(x), etc. + */ +#include <asm-powerpc/hw_irq.h> + +/* + * Do the CPU's IRQ-state tracing from assembly code. We call a + * C function, so save all the C-clobbered registers: + */ +#ifdef CONFIG_TRACE_IRQFLAGS + +#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS + +#else +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +#endif + +#endif diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index eba133d..c17c137 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -97,7 +97,7 @@ struct machdep_calls { void (*show_percpuinfo)(struct seq_file *m, int i); void (*init_IRQ)(void); - int (*get_irq)(struct pt_regs *); + unsigned int (*get_irq)(struct pt_regs *); #ifdef CONFIG_KEXEC void (*kexec_cpu_down)(int crash_shutdown, int secondary); #endif diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index f0d22ac..eb241c9 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h @@ -114,9 +114,6 @@ #define MPIC_VEC_TIMER_1 248 #define MPIC_VEC_TIMER_0 247 -/* Type definition of the cascade handler */ -typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data); - #ifdef CONFIG_MPIC_BROKEN_U3 /* Fixup table entry */ struct mpic_irq_fixup @@ -132,10 +129,19 @@ struct mpic_irq_fixup /* The instance data of a given MPIC */ struct mpic { + /* The device node of the interrupt controller */ + struct device_node *of_node; + + /* The remapper for this MPIC */ + struct irq_host *irqhost; + /* The "linux" controller struct */ - hw_irq_controller hc_irq; + struct irq_chip hc_irq; +#ifdef CONFIG_MPIC_BROKEN_U3 + struct irq_chip hc_ht_irq; +#endif #ifdef CONFIG_SMP - hw_irq_controller hc_ipi; + struct irq_chip hc_ipi; #endif const char *name; /* Flags */ @@ -144,20 +150,12 @@ struct mpic unsigned int isu_size; unsigned int isu_shift; unsigned int isu_mask; - /* Offset of irq vector numbers */ - unsigned int irq_offset; unsigned int irq_count; - /* Offset of ipi vector numbers */ - unsigned int ipi_offset; /* Number of sources */ unsigned int num_sources; /* Number of CPUs */ unsigned int num_cpus; - /* cascade handler */ - mpic_cascade_t cascade; - void *cascade_data; - unsigned int cascade_vec; - /* senses array */ + /* default senses array */ unsigned char *senses; unsigned int senses_count; @@ -213,14 +211,11 @@ struct mpic * The values in the array start at the first source of the MPIC, * that is senses[0] correspond to linux irq "irq_offset". */ -extern struct mpic *mpic_alloc(unsigned long phys_addr, +extern struct mpic *mpic_alloc(struct device_node *node, + unsigned long phys_addr, unsigned int flags, unsigned int isu_size, - unsigned int irq_offset, unsigned int irq_count, - unsigned int ipi_offset, - unsigned char *senses, - unsigned int senses_num, const char *name); /* Assign ISUs, to call before mpic_init() @@ -232,22 +227,27 @@ extern struct mpic *mpic_alloc(unsigned long phys_addr, extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, unsigned long phys_addr); +/* Set default sense codes + * + * @mpic: controller + * @senses: array of sense codes + * @count: size of above array + * + * Optionally provide an array (indexed on hardware interrupt numbers + * for this MPIC) of default sense codes for the chip. Those are linux + * sense codes IRQ_TYPE_* + * + * The driver gets ownership of the pointer, don't dispose of it or + * anything like that. __init only. + */ +extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count); + + /* Initialize the controller. After this has been called, none of the above * should be called again for this mpic */ extern void mpic_init(struct mpic *mpic); -/* Setup a cascade. Currently, only one cascade is supported this - * way, though you can always do a normal request_irq() and add - * other cascades this way. You should call this _after_ having - * added all the ISUs - * - * @irq_no: "linux" irq number of the cascade (that is offset'ed vector) - * @handler: cascade handler function - */ -extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder, - void *data); - /* * All of the following functions must only be used after the * ISUs have been assigned and the controller fully initialized @@ -284,9 +284,9 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); void smp_mpic_message_pass(int target, int msg); /* Fetch interrupt from a given mpic */ -extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); +extern unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); /* This one gets to the primary mpic */ -extern int mpic_get_irq(struct pt_regs *regs); +extern unsigned int mpic_get_irq(struct pt_regs *regs); /* Set the EPIC clock ratio */ void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); @@ -294,8 +294,5 @@ void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); /* Enable/Disable EPIC serial interrupt mode */ void mpic_set_serial_int(struct mpic *mpic, int enable); -/* global mpic for pSeries */ -extern struct mpic *pSeries_mpic; - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MPIC_H */ diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index faa1fc7..2f2e302 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -14,6 +14,7 @@ #define __per_cpu_offset(cpu) (paca[cpu].data_offset) #define __my_cpu_offset() get_paca()->data_offset +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 010d186..b095a28 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -64,11 +64,6 @@ struct boot_param_header typedef u32 phandle; typedef u32 ihandle; -struct interrupt_info { - int line; - int sense; /* +ve/-ve logic, edge or level, etc. */ -}; - struct property { char *name; int length; @@ -81,8 +76,6 @@ struct device_node { char *type; phandle node; phandle linux_phandle; - int n_intrs; - struct interrupt_info *intrs; char *full_name; struct property *properties; @@ -167,8 +160,8 @@ extern void unflatten_device_tree(void); extern void early_init_devtree(void *); extern int device_is_compatible(struct device_node *device, const char *); extern int machine_is_compatible(const char *compat); -extern unsigned char *get_property(struct device_node *node, const char *name, - int *lenp); +extern void *get_property(struct device_node *node, const char *name, + int *lenp); extern void print_properties(struct device_node *node); extern int prom_n_addr_cells(struct device_node* np); extern int prom_n_size_cells(struct device_node* np); @@ -204,6 +197,15 @@ extern int release_OF_resource(struct device_node* node, int index); */ +/* Helper to read a big number */ +static inline u64 of_read_number(u32 *cell, int size) +{ + u64 r = 0; + while (size--) + r = (r << 32) | *(cell++); + return r; +} + /* Translate an OF address block into a CPU physical address */ #define OF_BAD_ADDR ((u64)-1) @@ -240,5 +242,83 @@ extern void kdump_move_device_tree(void); /* CPU OF node matching */ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); + +/* + * OF interrupt mapping + */ + +/* This structure is returned when an interrupt is mapped. The controller + * field needs to be put() after use + */ + +#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */ + +struct of_irq { + struct device_node *controller; /* Interrupt controller node */ + u32 size; /* Specifier size */ + u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */ +}; + +/*** + * of_irq_map_init - Initialize the irq remapper + * @flags: flags defining workarounds to enable + * + * Some machines have bugs in the device-tree which require certain workarounds + * to be applied. Call this before any interrupt mapping attempts to enable + * those workarounds. + */ +#define OF_IMAP_OLDWORLD_MAC 0x00000001 +#define OF_IMAP_NO_PHANDLE 0x00000002 + +extern void of_irq_map_init(unsigned int flags); + +/*** + * of_irq_map_raw - Low level interrupt tree parsing + * @parent: the device interrupt parent + * @intspec: interrupt specifier ("interrupts" property of the device) + * @addr: address specifier (start of "reg" property of the device) + * @out_irq: structure of_irq filled by this function + * + * Returns 0 on success and a negative number on error + * + * This function is a low-level interrupt tree walking function. It + * can be used to do a partial walk with synthetized reg and interrupts + * properties, for example when resolving PCI interrupts when no device + * node exist for the parent. + * + */ + +extern int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr, + struct of_irq *out_irq); + + +/*** + * of_irq_map_one - Resolve an interrupt for a device + * @device: the device whose interrupt is to be resolved + * @index: index of the interrupt to resolve + * @out_irq: structure of_irq filled by this function + * + * This function resolves an interrupt, walking the tree, for a given + * device-tree node. It's the high level pendant to of_irq_map_raw(). + * It also implements the workarounds for OldWolrd Macs. + */ +extern int of_irq_map_one(struct device_node *device, int index, + struct of_irq *out_irq); + +/*** + * of_irq_map_pci - Resolve the interrupt for a PCI device + * @pdev: the device whose interrupt is to be resolved + * @out_irq: structure of_irq filled by this function + * + * This function resolves the PCI interrupt for a given PCI device. If a + * device-node exists for a given pci_dev, it will use normal OF tree + * walking. If not, it will implement standard swizzling and walk up the + * PCI tree until an device-node is found, at which point it will finish + * resolving using the OF tree walking. + */ +struct pci_dev; +extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); + + #endif /* __KERNEL__ */ #endif /* _POWERPC_PROM_H */ diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h index 2c2fe96..e929145 100644 --- a/include/asm-powerpc/rwsem.h +++ b/include/asm-powerpc/rwsem.h @@ -28,24 +28,11 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -60,9 +47,6 @@ static inline void init_rwsem(struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* diff --git a/include/asm-powerpc/signal.h b/include/asm-powerpc/signal.h index a4d8f86..a8c7bab 100644 --- a/include/asm-powerpc/signal.h +++ b/include/asm-powerpc/signal.h @@ -63,7 +63,6 @@ typedef struct { * SA_FLAGS values: * * SA_ONSTACK is not currently supported, but will allow sigaltstack(2). - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -83,7 +82,6 @@ typedef struct { #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000u /* dummy -- ignored */ #define SA_RESTORER 0x04000000U diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 9609d3e..c02d105 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -117,6 +117,7 @@ struct spu { struct list_head sched_list; int number; int nid; + unsigned int irqs[3]; u32 isrc; u32 node; u64 flags; diff --git a/include/asm-ppc/floppy.h b/include/asm-ppc/floppy.h index 2ba191e..d3963ca 100644 --- a/include/asm-ppc/floppy.h +++ b/include/asm-ppc/floppy.h @@ -96,11 +96,11 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { if (can_use_virtual_dma) - return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy", NULL); else - return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_interrupt, + IRQF_DISABLED, "floppy", NULL); } static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) diff --git a/include/asm-s390/irqflags.h b/include/asm-s390/irqflags.h new file mode 100644 index 0000000..65f4db6 --- /dev/null +++ b/include/asm-s390/irqflags.h @@ -0,0 +1,50 @@ +/* + * include/asm-s390/irqflags.h + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> + */ + +#ifndef __ASM_IRQFLAGS_H +#define __ASM_IRQFLAGS_H + +#ifdef __KERNEL__ + +/* interrupt control.. */ +#define raw_local_irq_enable() ({ \ + unsigned long __dummy; \ + __asm__ __volatile__ ( \ + "stosm 0(%1),0x03" \ + : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \ + }) + +#define raw_local_irq_disable() ({ \ + unsigned long __flags; \ + __asm__ __volatile__ ( \ + "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \ + __flags; \ + }) + +#define raw_local_save_flags(x) \ + __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ) + +#define raw_local_irq_restore(x) \ + __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory") + +#define raw_irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + !((flags >> __FLAG_SHIFT) & 3); \ +}) + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !((flags >> __FLAG_SHIFT) & 3); +} + +/* For spinlocks etc */ +#define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) + +#endif /* __KERNEL__ */ +#endif /* __ASM_IRQFLAGS_H */ diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h index d9a8cca..28b3517 100644 --- a/include/asm-s390/percpu.h +++ b/include/asm-s390/percpu.h @@ -42,6 +42,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) +#define per_cpu_offset(x) (__per_cpu_offset[x]) /* A macro to avoid #include hell... */ #define percpu_modcopy(pcpudst, src, size) \ diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h index 0422a08..13ec169 100644 --- a/include/asm-s390/rwsem.h +++ b/include/asm-s390/rwsem.h @@ -61,6 +61,9 @@ struct rw_semaphore { signed long count; spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; #ifndef __s390x__ @@ -80,8 +83,16 @@ struct rw_semaphore { /* * initialisation */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + #define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } +{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -93,6 +104,17 @@ static inline void init_rwsem(struct rw_semaphore *sem) INIT_LIST_HEAD(&sem->wait_list); } +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + + /* * lock for reading */ @@ -155,7 +177,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline void __down_write(struct rw_semaphore *sem) +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { signed long old, new, tmp; @@ -181,6 +203,11 @@ static inline void __down_write(struct rw_semaphore *sem) rwsem_down_write_failed(sem); } +static inline void __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + /* * trylock for writing -- returns 1 if successful, 0 if contention */ diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h index 702cf43..32cdc69 100644 --- a/include/asm-s390/semaphore.h +++ b/include/asm-s390/semaphore.h @@ -37,7 +37,8 @@ struct semaphore { static inline void sema_init (struct semaphore *sem, int val) { - *sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val); + atomic_set(&sem->count, val); + init_waitqueue_head(&sem->wait); } static inline void init_MUTEX (struct semaphore *sem) diff --git a/include/asm-s390/signal.h b/include/asm-s390/signal.h index 7084626..f6cfddb 100644 --- a/include/asm-s390/signal.h +++ b/include/asm-s390/signal.h @@ -84,7 +84,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -104,7 +103,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 71a0732..9ab186f 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h @@ -301,34 +301,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) -/* interrupt control.. */ -#define local_irq_enable() ({ \ - unsigned long __dummy; \ - __asm__ __volatile__ ( \ - "stosm 0(%1),0x03" \ - : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \ - }) - -#define local_irq_disable() ({ \ - unsigned long __flags; \ - __asm__ __volatile__ ( \ - "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \ - __flags; \ - }) - -#define local_save_flags(x) \ - __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ) - -#define local_irq_restore(x) \ - __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !((flags >> __FLAG_SHIFT) & 3); \ -}) - #ifdef __s390x__ #define __ctl_load(array, low, high) ({ \ @@ -442,8 +414,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) }) #endif /* __s390x__ */ -/* For spinlocks etc */ -#define local_irq_save(x) ((x) = local_irq_disable()) +#include <linux/irqflags.h> /* * Use to set psw mask except for the first byte which @@ -482,4 +453,3 @@ extern void (*_machine_power_off)(void); #endif /* __KERNEL__ */ #endif - diff --git a/include/asm-sh/floppy.h b/include/asm-sh/floppy.h index 307d9ce..dc1ad46 100644 --- a/include/asm-sh/floppy.h +++ b/include/asm-sh/floppy.h @@ -146,12 +146,11 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { if(can_use_virtual_dma) - return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy", NULL); else - return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT, - "floppy", NULL); - + return request_irq(FLOPPY_IRQ, floppy_interrupt, + IRQF_DISABLED, "floppy", NULL); } static unsigned long dma_mem_alloc(unsigned long size) diff --git a/include/asm-sh/mpc1211/keyboard.h b/include/asm-sh/mpc1211/keyboard.h index 5f0b908..71ef4cf 100644 --- a/include/asm-sh/mpc1211/keyboard.h +++ b/include/asm-sh/mpc1211/keyboard.h @@ -57,7 +57,7 @@ extern unsigned char pckbd_sysrq_xlate[128]; #define AUX_IRQ 12 #define aux_request_irq(hand, dev_id) \ - request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS2 Mouse", dev_id) + request_irq(AUX_IRQ, hand, IRQF_SHARED, "PS2 Mouse", dev_id) #define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id) diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h index 0262d3d..9d2aea5 100644 --- a/include/asm-sh/rwsem.h +++ b/include/asm-sh/rwsem.h @@ -25,24 +25,11 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -57,9 +44,6 @@ static inline void init_rwsem(struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* diff --git a/include/asm-sh/signal.h b/include/asm-sh/signal.h index d6e8eb0..5c5c1e8 100644 --- a/include/asm-sh/signal.h +++ b/include/asm-sh/signal.h @@ -75,7 +75,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -95,7 +94,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index b752e5c..ce2e606 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -12,7 +12,7 @@ */ #define switch_to(prev, next, last) do { \ - task_t *__last; \ + struct task_struct *__last; \ register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ diff --git a/include/asm-sh64/keyboard.h b/include/asm-sh64/keyboard.h index 733e2bb..1fab96d 100644 --- a/include/asm-sh64/keyboard.h +++ b/include/asm-sh64/keyboard.h @@ -65,7 +65,7 @@ extern unsigned char pckbd_sysrq_xlate[128]; #endif #define aux_request_irq(hand, dev_id) \ - request_irq(AUX_IRQ, hand, SA_SHIRQ, "PS2 Mouse", dev_id) + request_irq(AUX_IRQ, hand, IRQF_SHARED, "PS2 Mouse", dev_id) #define aux_free_irq(dev_id) free_irq(AUX_IRQ, dev_id) diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h index 2400dc6..a5a2820 100644 --- a/include/asm-sh64/signal.h +++ b/include/asm-sh64/signal.h @@ -74,7 +74,6 @@ typedef struct { * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -94,7 +93,6 @@ typedef struct { #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h index 7a941b8..c53b332 100644 --- a/include/asm-sparc/floppy.h +++ b/include/asm-sparc/floppy.h @@ -271,7 +271,8 @@ static int sun_fd_request_irq(void) if(!once) { once = 1; - error = request_fast_irq(FLOPPY_IRQ, floppy_hardint, SA_INTERRUPT, "floppy"); + error = request_fast_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy"); return ((error == 0) ? 0 : -1); } else return 0; } diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h index aa9960a..0ae5084 100644 --- a/include/asm-sparc/signal.h +++ b/include/asm-sparc/signal.h @@ -132,16 +132,13 @@ struct sigstack { * usage of signal stacks by using the (now obsolete) sa_restorer field in * the sigaction structure as a stack pointer. This is now possible due to * the changes in signal handling. LBT 010493. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_SHIRQ flag is for shared interrupt support on PCI and EISA. */ #define SA_NOCLDSTOP _SV_IGNCHILD #define SA_STACK _SV_SSTACK #define SA_ONSTACK _SV_SSTACK #define SA_RESTART _SV_INTR #define SA_ONESHOT _SV_RESET -#define SA_INTERRUPT 0x10u #define SA_NOMASK 0x20u #define SA_NOCLDWAIT 0x100u #define SA_SIGINFO 0x200u diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index b591d0e..abf1500 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h @@ -267,7 +267,7 @@ static int sun_fd_request_irq(void) once = 1; error = request_irq(FLOPPY_IRQ, sparc_floppy_irq, - SA_INTERRUPT, "floppy", NULL); + IRQF_DISABLED, "floppy", NULL); return ((error == 0) ? 0 : -1); } diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h index a6ece06..ced8cbd 100644 --- a/include/asm-sparc64/percpu.h +++ b/include/asm-sparc64/percpu.h @@ -11,6 +11,7 @@ extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_shift; #define __per_cpu_offset(__cpu) \ (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h index fdc42a1..9968871 100644 --- a/include/asm-sparc64/signal.h +++ b/include/asm-sparc64/signal.h @@ -133,16 +133,13 @@ struct sigstack { * usage of signal stacks by using the (now obsolete) sa_restorer field in * the sigaction structure as a stack pointer. This is now possible due to * the changes in signal handling. LBT 010493. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_SHIRQ flag is for shared interrupt support on PCI and EISA. */ #define SA_NOCLDSTOP _SV_IGNCHILD #define SA_STACK _SV_SSTACK #define SA_ONSTACK _SV_SSTACK #define SA_RESTART _SV_INTR #define SA_ONESHOT _SV_RESET -#define SA_INTERRUPT 0x10u #define SA_NOMASK 0x20u #define SA_NOCLDWAIT 0x100u #define SA_SIGINFO 0x200u diff --git a/include/asm-v850/signal.h b/include/asm-v850/signal.h index cb52caa..a38df08 100644 --- a/include/asm-v850/signal.h +++ b/include/asm-v850/signal.h @@ -77,7 +77,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -97,7 +96,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h index 006291e..32ff5d13 100644 --- a/include/asm-x86_64/floppy.h +++ b/include/asm-x86_64/floppy.h @@ -144,11 +144,11 @@ static int vdma_get_dma_residue(unsigned int dummy) static int fd_request_irq(void) { if(can_use_virtual_dma) - return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_hardint, + IRQF_DISABLED, "floppy", NULL); else - return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT, - "floppy", NULL); + return request_irq(FLOPPY_IRQ, floppy_interrupt, + IRQF_DISABLED, "floppy", NULL); } static unsigned long dma_mem_alloc(unsigned long size) diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h new file mode 100644 index 0000000..cce6937 --- /dev/null +++ b/include/asm-x86_64/irqflags.h @@ -0,0 +1,141 @@ +/* + * include/asm-x86_64/irqflags.h + * + * IRQ flags handling + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() functions from the lowlevel headers. + */ +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#ifndef __ASSEMBLY__ +/* + * Interrupt control: + */ + +static inline unsigned long __raw_local_save_flags(void) +{ + unsigned long flags; + + __asm__ __volatile__( + "# __raw_save_flags\n\t" + "pushfq ; popq %q0" + : "=g" (flags) + : /* no input */ + : "memory" + ); + + return flags; +} + +#define raw_local_save_flags(flags) \ + do { (flags) = __raw_local_save_flags(); } while (0) + +static inline void raw_local_irq_restore(unsigned long flags) +{ + __asm__ __volatile__( + "pushq %0 ; popfq" + : /* no output */ + :"g" (flags) + :"memory", "cc" + ); +} + +#ifdef CONFIG_X86_VSMP + +/* + * Interrupt control for the VSMP architecture: + */ + +static inline void raw_local_irq_disable(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); +} + +static inline void raw_local_irq_enable(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1<<9)) || (flags & (1 << 18)); +} + +#else /* CONFIG_X86_VSMP */ + +static inline void raw_local_irq_disable(void) +{ + __asm__ __volatile__("cli" : : : "memory"); +} + +static inline void raw_local_irq_enable(void) +{ + __asm__ __volatile__("sti" : : : "memory"); +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (1 << 9)); +} + +#endif + +/* + * For spinlocks, etc.: + */ + +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long flags = __raw_local_save_flags(); + + raw_local_irq_disable(); + + return flags; +} + +#define raw_local_irq_save(flags) \ + do { (flags) = __raw_local_irq_save(); } while (0) + +static inline int raw_irqs_disabled(void) +{ + unsigned long flags = __raw_local_save_flags(); + + return raw_irqs_disabled_flags(flags); +} + +/* + * Used in the idle loop; sti takes one instruction cycle + * to complete: + */ +static inline void raw_safe_halt(void) +{ + __asm__ __volatile__("sti; hlt" : : : "memory"); +} + +/* + * Used when interrupts are already enabled or to + * shutdown the processor: + */ +static inline void halt(void) +{ + __asm__ __volatile__("hlt": : :"memory"); +} + +#else /* __ASSEMBLY__: */ +# ifdef CONFIG_TRACE_IRQFLAGS +# define TRACE_IRQS_ON call trace_hardirqs_on_thunk +# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk +# else +# define TRACE_IRQS_ON +# define TRACE_IRQS_OFF +# endif +#endif + +#endif diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index cd52c7f..2b0c088 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -49,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str, return atomic_notifier_call_chain(&die_chain, val, &args); } -extern int printk_address(unsigned long address); +extern void printk_address(unsigned long address); extern void die(const char *,struct pt_regs *,long); extern void __die(const char *,struct pt_regs *,long); extern void show_registers(struct pt_regs *regs); diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 549eb92..08dd9f9 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -14,6 +14,8 @@ #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __my_cpu_offset() read_pda(data_offset) +#define per_cpu_offset(x) (__per_cpu_offset(x)) + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index f8d5579..cef7a7d 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -83,7 +83,6 @@ typedef unsigned long sigset_t; * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -103,7 +102,6 @@ typedef unsigned long sigset_t; #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 68e559f..f67f287 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -244,43 +244,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) -/* interrupt control.. */ -#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) -#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") - -#ifdef CONFIG_X86_VSMP -/* Interrupt control for VSMP architecture */ -#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) -#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (flags & (1<<18)) || !(flags & (1<<9)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) -#else /* CONFIG_X86_VSMP */ -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & (1<<9)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) -#endif - -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -/* used when interrupts are already enabled or to shutdown the processor */ -#define halt() __asm__ __volatile__("hlt": : :"memory") +#include <linux/irqflags.h> void cpu_idle_wait(void); diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h index abcd86d..0aad3a5 100644 --- a/include/asm-xtensa/rwsem.h +++ b/include/asm-xtensa/rwsem.h @@ -31,24 +31,11 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; -#endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEBUG_INIT } + LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) @@ -63,9 +50,6 @@ static inline void init_rwsem(struct rw_semaphore *sem) sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#if RWSEM_DEBUG - sem->debug = 0; -#endif } /* diff --git a/include/asm-xtensa/signal.h b/include/asm-xtensa/signal.h index a99c9ae..633ba73 100644 --- a/include/asm-xtensa/signal.h +++ b/include/asm-xtensa/signal.h @@ -75,7 +75,6 @@ typedef struct { * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. @@ -95,7 +94,6 @@ typedef struct { #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 @@ -109,19 +107,6 @@ typedef struct { #define SIGSTKSZ 8192 #ifndef __ASSEMBLY__ -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#define SA_PROBEIRQ 0x08000000 -#endif #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 90663ad..251c41e 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -21,6 +21,18 @@ struct completion { #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) +/* + * Lockdep needs to run a non-constant initializer for on-stack + * completions - so we use the _ONSTACK() variant for those that + * are on the kernel stack: + */ +#ifdef CONFIG_LOCKDEP +# define DECLARE_COMPLETION_ONSTACK(work) \ + struct completion work = ({ init_completion(&work); work; }) +#else +# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +#endif + static inline void init_completion(struct completion *x) { x->done = 0; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 0dd1610..471781f 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -114,6 +114,18 @@ struct dentry { unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; +/* + * dentry->d_lock spinlock nesting subclasses: + * + * 0: normal + * 1: nested + */ +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ + DENTRY_D_LOCK_NESTED +}; + struct dentry_operations { int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash) (struct dentry *, struct qstr *); diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h new file mode 100644 index 0000000..6a70478 --- /dev/null +++ b/include/linux/debug_locks.h @@ -0,0 +1,69 @@ +#ifndef __LINUX_DEBUG_LOCKING_H +#define __LINUX_DEBUG_LOCKING_H + +extern int debug_locks; +extern int debug_locks_silent; + +/* + * Generic 'turn off all lock debugging' function: + */ +extern int debug_locks_off(void); + +/* + * In the debug case we carry the caller's instruction pointer into + * other functions, but we dont want the function argument overhead + * in the nondebug case - hence these macros: + */ +#define _RET_IP_ (unsigned long)__builtin_return_address(0) +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) + +#define DEBUG_LOCKS_WARN_ON(c) \ +({ \ + int __ret = 0; \ + \ + if (unlikely(c)) { \ + if (debug_locks_off()) \ + WARN_ON(1); \ + __ret = 1; \ + } \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) +#else +# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS + extern void locking_selftest(void); +#else +# define locking_selftest() do { } while (0) +#endif + +#ifdef CONFIG_LOCKDEP +extern void debug_show_all_locks(void); +extern void debug_show_held_locks(struct task_struct *task); +extern void debug_check_no_locks_freed(const void *from, unsigned long len); +extern void debug_check_no_locks_held(struct task_struct *task); +#else +static inline void debug_show_all_locks(void) +{ +} + +static inline void debug_show_held_locks(struct task_struct *task) +{ +} + +static inline void +debug_check_no_locks_freed(const void *from, unsigned long len) +{ +} + +static inline void +debug_check_no_locks_held(struct task_struct *task) +{ +} +#endif + +#endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 272010a6..c94d8f1 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -44,7 +44,7 @@ enum dma_event { }; /** - * typedef dma_cookie_t + * typedef dma_cookie_t - an opaque DMA cookie * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ @@ -80,14 +80,14 @@ struct dma_chan_percpu { /** * struct dma_chan - devices supply DMA channels, clients use them - * @client: ptr to the client user of this chan, will be NULL when unused - * @device: ptr to the dma device who supplies this channel, always !NULL + * @client: ptr to the client user of this chan, will be %NULL when unused + * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client - * @chan_id: - * @class_dev: + * @chan_id: channel ID for sysfs + * @class_dev: class device for sysfs * @refcount: kref, used in "bigref" slow-mode - * @slow_ref: - * @rcu: + * @slow_ref: indicates that the DMA channel is free + * @rcu: the DMA channel's RCU head * @client_node: used to add this to the client chan list * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu @@ -162,10 +162,17 @@ struct dma_client { * @chancnt: how many DMA channels are supported * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list - * @refcount: - * @done: - * @dev_id: - * Other func ptrs: used to make use of this device's capabilities + * @refcount: reference count + * @done: IO completion struct + * @dev_id: unique device ID + * @device_alloc_chan_resources: allocate resources and return the + * number of allocated descriptors + * @device_free_chan_resources: release DMA channel's resources + * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer + * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page + * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset + * @device_memcpy_complete: poll the status of an IOAT DMA transaction + * @device_memcpy_issue_pending: push appended descriptors to hardware */ struct dma_device { @@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client, * Both @dest and @src must be mappable to a bus address according to the * DMA mapping API rules for streaming mappings. * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages) + * user space pages). */ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len) @@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, } /** - * dma_async_memcpy_buf_to_pg - offloaded copy + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page * @chan: DMA channel to offload copy to * @page: destination page * @offset: offset in page to copy to @@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, } /** - * dma_async_memcpy_buf_to_pg - offloaded copy + * dma_async_memcpy_pg_to_pg - offloaded copy from page to page * @chan: DMA channel to offload copy to - * @dest_page: destination page + * @dest_pg: destination page * @dest_off: offset in page to copy to - * @src_page: source page + * @src_pg: source page * @src_off: offset in page to copy from * @len: length * * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus * address according to the DMA mapping API rules for streaming mappings. * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages) + * (kernel memory or locked user space pages). */ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, unsigned int dest_off, struct page *src_pg, @@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, /** * dma_async_memcpy_issue_pending - flush pending copies to HW - * @chan: + * @chan: target DMA channel * * This allows drivers to push copies to HW in batches, * reducing MMIO writes where possible. diff --git a/include/linux/fs.h b/include/linux/fs.h index e04a5cf..134b320 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -436,6 +436,21 @@ struct block_device { }; /* + * bdev->bd_mutex nesting subclasses for the lock validator: + * + * 0: normal + * 1: 'whole' + * 2: 'partition' + */ +enum bdev_bd_mutex_lock_class +{ + BD_MUTEX_NORMAL, + BD_MUTEX_WHOLE, + BD_MUTEX_PARTITION +}; + + +/* * Radix-tree tags, for tagging dirty and writeback pages within the pagecache * radix trees */ @@ -543,6 +558,25 @@ struct inode { }; /* + * inode->i_mutex nesting subclasses for the lock validator: + * + * 0: the object of the current VFS operation + * 1: parent + * 2: child/target + * 3: quota file + * + * The locking order between these classes is + * parent -> child -> normal -> quota + */ +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_QUOTA +}; + +/* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic * with respect to the local cpu (unlike with preempt disabled), @@ -1276,6 +1310,8 @@ struct file_system_type { struct module *owner; struct file_system_type * next; struct list_head fs_supers; + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, @@ -1404,6 +1440,7 @@ extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, unsigned); +extern struct block_device *open_partition_by_devnum(dev_t, unsigned); extern const struct file_operations def_blk_fops; extern const struct address_space_operations def_blk_aops; extern const struct file_operations def_chr_fops; @@ -1414,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *, mode_t, unsigned); extern int blkdev_put(struct block_device *); +extern int blkdev_put_partition(struct block_device *); extern int bd_claim(struct block_device *, void *); extern void bd_release(struct block_device *); #ifdef CONFIG_SYSFS diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 114ae58..50d8b57 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -3,6 +3,7 @@ #include <linux/preempt.h> #include <linux/smp_lock.h> +#include <linux/lockdep.h> #include <asm/hardirq.h> #include <asm/system.h> @@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq); # define synchronize_irq(irq) barrier() #endif -#define nmi_enter() irq_enter() -#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) - struct task_struct; #ifndef CONFIG_VIRT_CPU_ACCOUNTING @@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif +/* + * It is safe to do non-atomic ops on ->hardirq_context, + * because NMI handlers may not preempt and the ops are + * always balanced, so the interrupted value of ->hardirq_context + * will always be restored. + */ #define irq_enter() \ do { \ account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ + trace_hardirq_enter(); \ + } while (0) + +/* + * Exit irq context without processing softirqs: + */ +#define __irq_exit() \ + do { \ + trace_hardirq_exit(); \ + account_system_vtime(current); \ + sub_preempt_count(HARDIRQ_OFFSET); \ } while (0) +/* + * Exit irq context and process softirqs if needed: + */ extern void irq_exit(void); +#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0) +#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) + #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 07d7305..e4bccbc 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -91,6 +91,7 @@ struct hrtimer_base { ktime_t (*get_softirq_time)(void); struct hrtimer *curr_timer; ktime_t softirq_time; + struct lock_class_key lock_key; }; /* diff --git a/include/linux/ide.h b/include/linux/ide.h index 285316c..dc7abef 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem; * ide_drive_t->hwif: constant, no locking */ -#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0) +#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) extern struct bus_type ide_bus_type; diff --git a/include/linux/idr.h b/include/linux/idr.h index f559a71..8268034 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -66,7 +66,7 @@ struct idr { .id_free = NULL, \ .layers = 0, \ .id_free_cnt = 0, \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3a25695..60aac2c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -3,6 +3,8 @@ #include <linux/file.h> #include <linux/rcupdate.h> +#include <linux/irqflags.h> +#include <linux/lockdep.h> #define INIT_FDTABLE \ { \ @@ -21,7 +23,7 @@ .count = ATOMIC_INIT(1), \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ - .file_lock = SPIN_LOCK_UNLOCKED, \ + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \ .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ @@ -36,7 +38,7 @@ .user_id = 0, \ .next = NULL, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ - .ctx_lock = SPIN_LOCK_UNLOCKED, \ + .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ .reqs_active = 0U, \ .max_reqs = ~0U, \ } @@ -48,7 +50,7 @@ .mm_users = ATOMIC_INIT(2), \ .mm_count = ATOMIC_INIT(1), \ .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ - .page_table_lock = SPIN_LOCK_UNLOCKED, \ + .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ .mmlist = LIST_HEAD_INIT(name.mmlist), \ .cpu_vm_mask = CPU_MASK_ALL, \ } @@ -69,7 +71,7 @@ #define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ .action = { { { .sa_handler = NULL, } }, }, \ - .siglock = SPIN_LOCK_UNLOCKED, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ } extern struct group_info init_groups; @@ -119,12 +121,13 @@ extern struct group_info init_groups; .list = LIST_HEAD_INIT(tsk.pending.list), \ .signal = {{0}}}, \ .blocked = {{0}}, \ - .alloc_lock = SPIN_LOCK_UNLOCKED, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ .pi_lock = SPIN_LOCK_UNLOCKED, \ - INIT_RT_MUTEXES(tsk) \ + INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index db2a63a..d5afee9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -10,10 +10,60 @@ #include <linux/irqreturn.h> #include <linux/hardirq.h> #include <linux/sched.h> +#include <linux/irqflags.h> #include <asm/atomic.h> #include <asm/ptrace.h> #include <asm/system.h> +/* + * These correspond to the IORESOURCE_IRQ_* defines in + * linux/ioport.h to select the interrupt line behaviour. When + * requesting an interrupt without specifying a IRQF_TRIGGER, the + * setting should be assumed to be "as already configured", which + * may be as per machine or firmware initialisation. + */ +#define IRQF_TRIGGER_NONE 0x00000000 +#define IRQF_TRIGGER_RISING 0x00000001 +#define IRQF_TRIGGER_FALLING 0x00000002 +#define IRQF_TRIGGER_HIGH 0x00000004 +#define IRQF_TRIGGER_LOW 0x00000008 +#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) +#define IRQF_TRIGGER_PROBE 0x00000010 + +/* + * These flags used only by the kernel as part of the + * irq handling routines. + * + * IRQF_DISABLED - keep irqs disabled when calling the action handler + * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator + * IRQF_SHARED - allow sharing the irq among several devices + * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur + * IRQF_TIMER - Flag to mark this interrupt as timer interrupt + */ +#define IRQF_DISABLED 0x00000020 +#define IRQF_SAMPLE_RANDOM 0x00000040 +#define IRQF_SHARED 0x00000080 +#define IRQF_PROBE_SHARED 0x00000100 +#define IRQF_TIMER 0x00000200 +#define IRQF_PERCPU 0x00000400 + +/* + * Migration helpers. Scheduled for removal in 1/2007 + * Do not use for new code ! + */ +#define SA_INTERRUPT IRQF_DISABLED +#define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM +#define SA_SHIRQ IRQF_SHARED +#define SA_PROBEIRQ IRQF_PROBE_SHARED +#define SA_PERCPU IRQF_PERCPU + +#define SA_TRIGGER_LOW IRQF_TRIGGER_LOW +#define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH +#define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING +#define SA_TRIGGER_RISING IRQF_TRIGGER_RISING +#define SA_TRIGGER_MASK IRQF_TRIGGER_MASK + struct irqaction { irqreturn_t (*handler)(int, void *, struct pt_regs *); unsigned long flags; @@ -31,12 +81,64 @@ extern int request_irq(unsigned int, unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq() do { } while (0) +#else +# define local_irq_enable_in_hardirq() local_irq_enable() +#endif #ifdef CONFIG_GENERIC_HARDIRQS extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); +/* + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. + */ +static inline void disable_irq_nosync_lockdep(unsigned int irq) +{ + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP + local_irq_enable(); +#endif + enable_irq(irq); +} + /* IRQ wakeup (PM) control: */ extern int set_irq_wake(unsigned int irq, unsigned int on); @@ -50,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq) return set_irq_wake(irq, 0); } -#endif +#else /* !CONFIG_GENERIC_HARDIRQS */ +/* + * NOTE: non-genirq architectures, if they want to support the lock + * validator need to define the methods below in their asm/irq.h + * files, under an #ifdef CONFIG_LOCKDEP section. + */ +# ifndef CONFIG_LOCKDEP +# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_lockdep(irq) disable_irq(irq) +# define enable_irq_lockdep(irq) enable_irq(irq) +# endif + +#endif /* CONFIG_GENERIC_HARDIRQS */ #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) @@ -86,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x) #define save_and_cli(x) save_and_cli(&x) #endif /* CONFIG_SMP */ -/* SoftIRQ primitives. */ -#define local_bh_disable() \ - do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) -#define __local_bh_enable() \ - do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) - +extern void local_bh_disable(void); +extern void __local_bh_enable(void); +extern void _local_bh_enable(void); extern void local_bh_enable(void); +extern void local_bh_enable_ip(unsigned long ip); /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 87a9fc0..5612dfe 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -55,6 +55,7 @@ struct resource_list { #define IORESOURCE_IRQ_LOWEDGE (1<<1) #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) #define IORESOURCE_IRQ_LOWLEVEL (1<<3) +#define IORESOURCE_IRQ_SHAREABLE (1<<4) /* ISA PnP DMA specific bits (IORESOURCE_BITS) */ #define IORESOURCE_DMA_TYPE_MASK (3<<0) diff --git a/include/linux/irq.h b/include/linux/irq.h index 0832149..b48eae3 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -24,41 +24,40 @@ /* * IRQ line status. + * + * Bits 0-16 are reserved for the IRQF_* bits in linux/interrupt.h + * + * IRQ types */ -#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */ -#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */ -#define IRQ_PENDING 4 /* IRQ pending - replay on enable */ -#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ -#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ -#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ -#define IRQ_LEVEL 64 /* IRQ level triggered */ -#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ +#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ +#define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ +#define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ +#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) +#define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ +#define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ +#define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ +#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ + +/* Internal flags */ +#define IRQ_INPROGRESS 0x00010000 /* IRQ handler active - do not enter! */ +#define IRQ_DISABLED 0x00020000 /* IRQ disabled - do not enter! */ +#define IRQ_PENDING 0x00040000 /* IRQ pending - replay on enable */ +#define IRQ_REPLAY 0x00080000 /* IRQ has been replayed but not acked yet */ +#define IRQ_AUTODETECT 0x00100000 /* IRQ is being autodetected */ +#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */ +#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */ +#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */ #ifdef CONFIG_IRQ_PER_CPU -# define IRQ_PER_CPU 256 /* IRQ is per CPU */ +# define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */ # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) #else # define CHECK_IRQ_PER_CPU(var) 0 #endif -#define IRQ_NOPROBE 512 /* IRQ is not valid for probing */ -#define IRQ_NOREQUEST 1024 /* IRQ cannot be requested */ -#define IRQ_NOAUTOEN 2048 /* IRQ will not be enabled on request irq */ -#define IRQ_DELAYED_DISABLE \ - 4096 /* IRQ disable (masking) happens delayed. */ - -/* - * IRQ types, see also include/linux/interrupt.h - */ -#define IRQ_TYPE_NONE 0x0000 /* Default, unspecified type */ -#define IRQ_TYPE_EDGE_RISING 0x0001 /* Edge rising type */ -#define IRQ_TYPE_EDGE_FALLING 0x0002 /* Edge falling type */ -#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) -#define IRQ_TYPE_LEVEL_HIGH 0x0004 /* Level high type */ -#define IRQ_TYPE_LEVEL_LOW 0x0008 /* Level low type */ -#define IRQ_TYPE_SENSE_MASK 0x000f /* Mask of the above */ -#define IRQ_TYPE_SIMPLE 0x0010 /* Simple type */ -#define IRQ_TYPE_PERCPU 0x0020 /* Per CPU type */ -#define IRQ_TYPE_PROBE 0x0040 /* Probing in progress */ +#define IRQ_NOPROBE 0x02000000 /* IRQ is not valid for probing */ +#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */ +#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */ +#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */ struct proc_dir_entry; @@ -183,6 +182,10 @@ extern int setup_irq(unsigned int irq, struct irqaction *new); #ifdef CONFIG_GENERIC_HARDIRQS +#ifndef handle_dynamic_tick +# define handle_dynamic_tick(a) do { } while (0) +#endif + #ifdef CONFIG_SMP static inline void set_native_irq_info(int irq, cpumask_t mask) { @@ -348,8 +351,9 @@ extern int noirqdebug_setup(char *str); /* Checks whether the interrupt can be requested by request_irq(): */ extern int can_request_irq(unsigned int irq, unsigned long irqflags); -/* Dummy irq-chip implementation: */ +/* Dummy irq-chip implementations: */ extern struct irq_chip no_irq_chip; +extern struct irq_chip dummy_irq_chip; extern void set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h new file mode 100644 index 0000000..412e025 --- /dev/null +++ b/include/linux/irqflags.h @@ -0,0 +1,96 @@ +/* + * include/linux/irqflags.h + * + * IRQ flags tracing: follow the state of the hardirq and softirq flags and + * provide callbacks for transitions between ON and OFF states. + * + * This file gets included from lowlevel asm headers too, to provide + * wrapped versions of the local_irq_*() APIs, based on the + * raw_local_irq_*() macros from the lowlevel headers. + */ +#ifndef _LINUX_TRACE_IRQFLAGS_H +#define _LINUX_TRACE_IRQFLAGS_H + +#ifdef CONFIG_TRACE_IRQFLAGS + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); + extern void trace_softirqs_on(unsigned long ip); + extern void trace_softirqs_off(unsigned long ip); +# define trace_hardirq_context(p) ((p)->hardirq_context) +# define trace_softirq_context(p) ((p)->softirq_context) +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) +# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) +# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +# define trace_softirq_enter() do { current->softirq_context++; } while (0) +# define trace_softirq_exit() do { current->softirq_context--; } while (0) +# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, +#else +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define trace_softirqs_on(ip) do { } while (0) +# define trace_softirqs_off(ip) do { } while (0) +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define trace_softirq_enter() do { } while (0) +# define trace_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT + +#include <asm/irqflags.h> + +#define local_irq_enable() \ + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) +#define local_irq_disable() \ + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) + +#define local_irq_restore(flags) \ + do { \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ + } while (0) +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ +/* + * The local_irq_*() APIs are equal to the raw_local_irq*() + * if !TRACE_IRQFLAGS. + */ +# define raw_local_irq_disable() local_irq_disable() +# define raw_local_irq_enable() local_irq_enable() +# define raw_local_irq_save(flags) local_irq_save(flags) +# define raw_local_irq_restore(flags) local_irq_restore(flags) +#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#define safe_halt() \ + do { \ + trace_hardirqs_on(); \ + raw_safe_halt(); \ + } while (0) + +#define local_save_flags(flags) raw_local_save_flags(flags) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + \ + raw_local_save_flags(flags); \ + raw_irqs_disabled_flags(flags); \ +}) + +#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +#endif /* CONFIG_X86 */ + +#endif diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 54e2549..849043c 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -57,10 +57,25 @@ do { \ #define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr) #endif -#define print_symbol(fmt, addr) \ -do { \ - __check_printsym_format(fmt, ""); \ - __print_symbol(fmt, addr); \ +static inline void print_symbol(const char *fmt, unsigned long addr) +{ + __check_printsym_format(fmt, ""); + __print_symbol(fmt, (unsigned long) + __builtin_extract_return_addr((void *)addr)); +} + +#ifndef CONFIG_64BIT +#define print_ip_sym(ip) \ +do { \ + printk("[<%08lx>]", ip); \ + print_symbol(" %s\n", ip); \ } while(0) +#else +#define print_ip_sym(ip) \ +do { \ + printk("[<%016lx>]", ip); \ + print_symbol(" %s\n", ip); \ +} while(0) +#endif #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h new file mode 100644 index 0000000..316e0fb --- /dev/null +++ b/include/linux/lockdep.h @@ -0,0 +1,353 @@ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * + * see Documentation/lockdep-design.txt for more details. + */ +#ifndef __LINUX_LOCKDEP_H +#define __LINUX_LOCKDEP_H + +#include <linux/linkage.h> +#include <linux/list.h> +#include <linux/debug_locks.h> +#include <linux/stacktrace.h> + +#ifdef CONFIG_LOCKDEP + +/* + * Lock-class usage-state bits: + */ +enum lock_usage_bit +{ + LOCK_USED = 0, + LOCK_USED_IN_HARDIRQ, + LOCK_USED_IN_SOFTIRQ, + LOCK_ENABLED_SOFTIRQS, + LOCK_ENABLED_HARDIRQS, + LOCK_USED_IN_HARDIRQ_READ, + LOCK_USED_IN_SOFTIRQ_READ, + LOCK_ENABLED_SOFTIRQS_READ, + LOCK_ENABLED_HARDIRQS_READ, + LOCK_USAGE_STATES +}; + +/* + * Usage-state bitmasks: + */ +#define LOCKF_USED (1 << LOCK_USED) +#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) +#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) +#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) + +#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) +#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) + +#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) +#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) +#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) + +#define LOCKF_ENABLED_IRQS_READ \ + (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) +#define LOCKF_USED_IN_IRQ_READ \ + (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +/* + * Lock-classes are keyed via unique addresses, by embedding the + * lockclass-key into the kernel (or module) .data section. (For + * static locks we use the lock address itself as the key.) + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +/* + * The lock-class itself: + */ +struct lock_class { + /* + * class-hash: + */ + struct list_head hash_entry; + + /* + * global list of all lock-classes: + */ + struct list_head lock_entry; + + struct lockdep_subclass_key *key; + unsigned int subclass; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + struct stack_trace usage_traces[LOCK_USAGE_STATES]; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + unsigned int version; + + /* + * Statistics counter: + */ + unsigned long ops; + + const char *name; + int name_version; +}; + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class[MAX_LOCKDEP_SUBCLASSES]; + const char *name; +}; + +/* + * Every lock has a list of other locks that were taken after it. + * We only grow the list, never remove from it: + */ +struct lock_list { + struct list_head entry; + struct lock_class *class; + struct stack_trace trace; +}; + +/* + * We record lock dependency chains, so that we can cache them: + */ +struct lock_chain { + struct list_head entry; + u64 chain_key; +}; + +struct held_lock { + /* + * One-way hash of the dependency chain up to this point. We + * hash the hashes step by step as the dependency chain grows. + * + * We use it for dependency-caching and we skip detection + * passes and dependency-updates if there is a cache-hit, so + * it is absolutely critical for 100% coverage of the validator + * to have a unique key value for every unique dependency path + * that can occur in the system, to make a unique hash value + * as likely as possible - hence the 64-bit width. + * + * The task struct holds the current hash value (initialized + * with zero), here we store the previous hash value: + */ + u64 prev_chain_key; + struct lock_class *class; + unsigned long acquire_ip; + struct lockdep_map *instance; + + /* + * The lock-stack is unified in that the lock chains of interrupt + * contexts nest ontop of process context chains, but we 'separate' + * the hashes by starting with 0 if we cross into an interrupt + * context, and we also keep do not add cross-context lock + * dependencies - the lock usage graph walking covers that area + * anyway, and we'd just unnecessarily increase the number of + * dependencies otherwise. [Note: hardirq and softirq contexts + * are separated from each other too.] + * + * The following field is used to detect when we cross into an + * interrupt context: + */ + int irq_context; + int trylock; + int read; + int check; + int hardirqs_off; +}; + +/* + * Initialization, self-test and debugging-output methods: + */ +extern void lockdep_init(void); +extern void lockdep_info(void); +extern void lockdep_reset(void); +extern void lockdep_reset_lock(struct lockdep_map *lock); +extern void lockdep_free_key_range(void *start, unsigned long size); + +extern void lockdep_off(void); +extern void lockdep_on(void); +extern int lockdep_internal(void); + +/* + * These methods are used by specific locking variants (spinlocks, + * rwlocks, mutexes and rwsems) to pass init/acquire/release events + * to lockdep: + */ + +extern void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key); + +/* + * Reinitialize a lock key - for cases where there is special locking or + * special initialization of locks so that the validator gets the scope + * of dependencies wrong: they are either too broad (they need a class-split) + * or they are too narrow (they suffer from a false class-split): + */ +#define lockdep_set_class(lock, key) \ + lockdep_init_map(&(lock)->dep_map, #key, key) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map(&(lock)->dep_map, name, key) + +/* + * Acquire a lock. + * + * Values for "read": + * + * 0: exclusive (write) acquire + * 1: read-acquire (no recursion allowed) + * 2: read-acquire with same-instance recursion allowed + * + * Values for check: + * + * 0: disabled + * 1: simple checks (freeing, held-at-exit-time, etc.) + * 2: full validation + */ +extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, unsigned long ip); + +extern void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); + +# define INIT_LOCKDEP .lockdep_recursion = 0, + +#else /* !LOCKDEP */ + +static inline void lockdep_off(void) +{ +} + +static inline void lockdep_on(void) +{ +} + +static inline int lockdep_internal(void) +{ + return 0; +} + +# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_release(l, n, i) do { } while (0) +# define lockdep_init() do { } while (0) +# define lockdep_info() do { } while (0) +# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0) +# define lockdep_set_class(lock, key) do { (void)(key); } while (0) +# define lockdep_set_class_and_name(lock, key, name) \ + do { (void)(key); } while (0) +# define INIT_LOCKDEP +# define lockdep_reset() do { debug_locks = 1; } while (0) +# define lockdep_free_key_range(start, size) do { } while (0) +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; +#endif /* !LOCKDEP */ + +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) +extern void early_init_irq_lock_class(void); +#else +# define early_init_irq_lock_class() do { } while (0) +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS +extern void early_boot_irqs_off(void); +extern void early_boot_irqs_on(void); +#else +# define early_boot_irqs_off() do { } while (0) +# define early_boot_irqs_on() do { } while (0) +#endif + +/* + * For trivial one-depth nesting of a lock-class, the following + * global define can be used. (Subsystems with multiple levels + * of nesting should define their own lock-nesting subclasses.) + */ +#define SINGLE_DEPTH_NESTING 1 + +/* + * Map the dependency ops to NOP or to real lockdep ops, depending + * on the per lock-class debug mode: + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# else +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# endif +# define spin_release(l, n, i) lock_release(l, n, i) +#else +# define spin_acquire(l, s, t, i) do { } while (0) +# define spin_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# else +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# endif +# define rwlock_release(l, n, i) lock_release(l, n, i) +#else +# define rwlock_acquire(l, s, t, i) do { } while (0) +# define rwlock_acquire_read(l, s, t, i) do { } while (0) +# define rwlock_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# else +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# endif +# define mutex_release(l, n, i) lock_release(l, n, i) +#else +# define mutex_acquire(l, s, t, i) do { } while (0) +# define mutex_release(l, n, i) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# else +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# endif +# define rwsem_release(l, n, i) lock_release(l, n, i) +#else +# define rwsem_acquire(l, s, t, i) do { } while (0) +# define rwsem_acquire_read(l, s, t, i) do { } while (0) +# define rwsem_release(l, n, i) do { } while (0) +#endif + +#endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 7517952..990957e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -14,6 +14,7 @@ #include <linux/prio_tree.h> #include <linux/fs.h> #include <linux/mutex.h> +#include <linux/debug_locks.h> struct mempolicy; struct anon_vma; @@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm, } #endif /* CONFIG_PROC_FS */ -static inline void -debug_check_no_locks_freed(const void *from, unsigned long len) -{ - mutex_debug_check_no_locks_freed(from, len); - rt_mutex_debug_check_no_locks_freed(from, len); -} - #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 27e748e..656b588 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -150,6 +150,10 @@ struct zone { unsigned long lowmem_reserve[MAX_NR_ZONES]; #ifdef CONFIG_NUMA + /* + * zone reclaim becomes active if more unmapped pages exist. + */ + unsigned long min_unmapped_ratio; struct per_cpu_pageset *pageset[NR_CPUS]; #else struct per_cpu_pageset pageset[NR_CPUS]; @@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, + struct file *, void __user *, size_t *, loff_t *); #include <linux/topology.h> /* Returns the number of the current Node. */ diff --git a/include/linux/module.h b/include/linux/module.h index 9e9dc7c..d06c74f 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -358,6 +358,7 @@ static inline int module_is_live(struct module *mod) /* Is this address in a module? (second is with no locks, for oops) */ struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); +int is_module_address(unsigned long addr); /* Returns module and fills in value, defined and namebuf, or NULL if symnum out of range. */ @@ -496,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr) return NULL; } +static inline int is_module_address(unsigned long addr) +{ + return 0; +} + /* Get/put a kernel symbol (calls should be symmetric) */ #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) #define symbol_put(x) do { } while(0) diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 7a7fbe8..1221b7c 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -19,21 +19,21 @@ /** * struct nand_bbt_descr - bad block table descriptor - * @param options options for this descriptor - * @param pages the page(s) where we find the bbt, used with + * @options: options for this descriptor + * @pages: the page(s) where we find the bbt, used with * option BBT_ABSPAGE when bbt is searched, * then we store the found bbts pages here. * Its an array and supports up to 8 chips now - * @param offs offset of the pattern in the oob area of the page - * @param veroffs offset of the bbt version counter in the oob are of the page - * @param version version read from the bbt page during scan - * @param len length of the pattern, if 0 no pattern check is performed - * @param maxblocks maximum number of blocks to search for a bbt. This number of - * blocks is reserved at the end of the device + * @offs: offset of the pattern in the oob area of the page + * @veroffs: offset of the bbt version counter in the oob area of the page + * @version: version read from the bbt page during scan + * @len: length of the pattern, if 0 no pattern check is performed + * @maxblocks: maximum number of blocks to search for a bbt. This + * number of blocks is reserved at the end of the device * where the tables are written. - * @param reserved_block_code if non-0, this pattern denotes a reserved + * @reserved_block_code: if non-0, this pattern denotes a reserved * (rather than bad) block in the stored bbt - * @param pattern pattern to identify bad block table or factory marked + * @pattern: pattern to identify bad block table or factory marked * good / bad blocks, can be NULL, if len = 0 * * Descriptor for the bad block table marker and the descriptor for the @@ -93,12 +93,15 @@ struct nand_bbt_descr { #define ONENAND_BADBLOCK_POS 0 /** - * struct bbt_info - [GENERIC] Bad Block Table data structure - * @param bbt_erase_shift [INTERN] number of address bits in a bbt entry - * @param badblockpos [INTERN] position of the bad block marker in the oob area - * @param bbt [INTERN] bad block table pointer - * @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan - * @param priv [OPTIONAL] pointer to private bbm date + * struct bbm_info - [GENERIC] Bad Block Table data structure + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @badblockpos: [INTERN] position of the bad block marker in the oob area + * @options: options for this descriptor + * @bbt: [INTERN] bad block table pointer + * @isbad_bbt: function to determine if a block is bad + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for + * initial bad block scan + * @priv: [OPTIONAL] pointer to private bbm date */ struct bbm_info { int bbt_erase_shift; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 9b7a2b5..94a443d 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -77,11 +77,11 @@ typedef enum { * * @len: number of bytes to write/read. When a data buffer is given * (datbuf != NULL) this is the number of data bytes. When - + no data buffer is available this is the number of oob bytes. + * no data buffer is available this is the number of oob bytes. * * @retlen: number of bytes written/read. When a data buffer is given * (datbuf != NULL) this is the number of data bytes. When - + no data buffer is available this is the number of oob bytes. + * no data buffer is available this is the number of oob bytes. * * @ooblen: number of oob bytes per page * @ooboffs: offset of oob data in the oob area (only relevant when diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 6655927..0b4cd2f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -202,7 +202,7 @@ typedef enum { struct nand_chip; /** - * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices + * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices * @lock: protection lock * @active: the mtd device which holds the controller currently * @wq: wait queue to sleep on if a NAND operation is in progress @@ -223,12 +223,15 @@ struct nand_hw_control { * @total: total number of ecc bytes per page * @prepad: padding information for syndrome based ecc generators * @postpad: padding information for syndrome based ecc generators + * @layout: ECC layout control struct pointer * @hwctl: function to control hardware ecc generator. Must only * be provided if an hardware ECC is available * @calculate: function for ecc calculation or readback from ecc hardware * @correct: function for ecc correction, matching to ecc generator (sw/hw) * @read_page: function to read a page according to the ecc generator requirements * @write_page: function to write a page according to the ecc generator requirements + * @read_oob: function to read chip OOB data + * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { nand_ecc_modes_t mode; @@ -300,11 +303,15 @@ struct nand_buffers { * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready * @ecc: [BOARDSPECIFIC] ecc control ctructure + * @buffers: buffer structure for read/write + * @hwcontrol: platform-specific hardware control structure + * @ops: oob operation operands * @erase_cmd: [INTERN] erase command write function, selectable due to AND support * @scan_bbt: [REPLACEABLE] function to scan bad block table * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress * @state: [INTERN] the current state of the NAND device + * @oob_poi: poison value buffer * @page_shift: [INTERN] number of address bits in a page (column address bits) * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry @@ -400,7 +407,6 @@ struct nand_chip { /** * struct nand_flash_dev - NAND Flash Device ID Structure - * * @name: Identify the device type * @id: device ID code * @pagesize: Pagesize in bytes. Either 256 or 512 or 0 @@ -519,9 +525,8 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, /** * struct platform_nand_chip - chip level device structure - * * @nr_chips: max. number of chips to scan for - * @chip_offs: chip number offset + * @chip_offset: chip number offset * @nr_partitions: number of partitions pointed to by partitions (or zero) * @partitions: mtd partition list * @chip_delay: R/B delay value in us @@ -542,11 +547,10 @@ struct platform_nand_chip { /** * struct platform_nand_ctrl - controller level device structure - * * @hwcontrol: platform specific hardware control structure * @dev_ready: platform specific function to read ready/busy pin * @select_chip: platform specific chip select function - * @priv_data: private data to transport driver specific settings + * @priv: private data to transport driver specific settings * * All fields are optional and depend on the hardware driver requirements */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 9ce9a48..1f49721 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -23,7 +23,7 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips); /* Free resources held by the OneNAND device */ extern void onenand_release(struct mtd_info *mtd); -/** +/* * onenand_state_t - chip states * Enumeration for OneNAND flash chip state */ @@ -42,9 +42,9 @@ typedef enum { /** * struct onenand_bufferram - OneNAND BufferRAM Data - * @param block block address in BufferRAM - * @param page page address in BufferRAM - * @param valid valid flag + * @block: block address in BufferRAM + * @page: page address in BufferRAM + * @valid: valid flag */ struct onenand_bufferram { int block; @@ -54,32 +54,43 @@ struct onenand_bufferram { /** * struct onenand_chip - OneNAND Private Flash Chip Data - * @param base [BOARDSPECIFIC] address to access OneNAND - * @param chipsize [INTERN] the size of one chip for multichip arrays - * @param device_id [INTERN] device ID - * @param verstion_id [INTERN] version ID - * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about - * @param erase_shift [INTERN] number of address bits in a block - * @param page_shift [INTERN] number of address bits in a page - * @param ppb_shift [INTERN] number of address bits in a pages per block - * @param page_mask [INTERN] a page per block mask - * @param bufferam_index [INTERN] BufferRAM index - * @param bufferam [INTERN] BufferRAM info - * @param readw [REPLACEABLE] hardware specific function for read short - * @param writew [REPLACEABLE] hardware specific function for write short - * @param command [REPLACEABLE] hardware specific function for writing commands to the chip - * @param wait [REPLACEABLE] hardware specific function for wait on ready - * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area - * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area - * @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND - * @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND - * @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table - * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip - * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress - * @param state [INTERN] the current state of the OneNAND device - * @param ecclayout [REPLACEABLE] the default ecc placement scheme - * @param bbm [REPLACEABLE] pointer to Bad Block Management - * @param priv [OPTIONAL] pointer to private chip date + * @base: [BOARDSPECIFIC] address to access OneNAND + * @chipsize: [INTERN] the size of one chip for multichip arrays + * @device_id: [INTERN] device ID + * @density_mask: chip density, used for DDP devices + * @verstion_id: [INTERN] version ID + * @options: [BOARDSPECIFIC] various chip options. They can + * partly be set to inform onenand_scan about + * @erase_shift: [INTERN] number of address bits in a block + * @page_shift: [INTERN] number of address bits in a page + * @ppb_shift: [INTERN] number of address bits in a pages per block + * @page_mask: [INTERN] a page per block mask + * @bufferram_index: [INTERN] BufferRAM index + * @bufferram: [INTERN] BufferRAM info + * @readw: [REPLACEABLE] hardware specific function for read short + * @writew: [REPLACEABLE] hardware specific function for write short + * @command: [REPLACEABLE] hardware specific function for writing + * commands to the chip + * @wait: [REPLACEABLE] hardware specific function for wait on ready + * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area + * @read_word: [REPLACEABLE] hardware specific function for read + * register of OneNAND + * @write_word: [REPLACEABLE] hardware specific function for write + * register of OneNAND + * @mmcontrol: sync burst read function + * @block_markbad: function to mark a block as bad + * @scan_bbt: [REPLACEALBE] hardware specific function for scanning + * Bad block Table + * @chip_lock: [INTERN] spinlock used to protect access to this + * structure and the chip + * @wq: [INTERN] wait queue to sleep on if a OneNAND + * operation is in progress + * @state: [INTERN] the current state of the OneNAND device + * @page_buf: data buffer + * @ecclayout: [REPLACEABLE] the default ecc placement scheme + * @bbm: [REPLACEABLE] pointer to Bad Block Management + * @priv: [OPTIONAL] pointer to private chip date */ struct onenand_chip { void __iomem *base; @@ -147,9 +158,9 @@ struct onenand_chip { #define ONENAND_MFR_SAMSUNG 0xec /** - * struct nand_manufacturers - NAND Flash Manufacturer ID Structure - * @param name: Manufacturer name - * @param id: manufacturer ID code of device. + * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. */ struct onenand_manufacturers { int id; diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h index 8b5769f..2537285 100644 --- a/include/linux/mutex-debug.h +++ b/include/linux/mutex-debug.h @@ -2,22 +2,22 @@ #define __LINUX_MUTEX_DEBUG_H #include <linux/linkage.h> +#include <linux/lockdep.h> /* * Mutexes - debugging helpers: */ -#define __DEBUG_MUTEX_INITIALIZER(lockname) \ - , .held_list = LIST_HEAD_INIT(lockname.held_list), \ - .name = #lockname , .magic = &lockname +#define __DEBUG_MUTEX_INITIALIZER(lockname) \ + , .magic = &lockname -#define mutex_init(sem) __mutex_init(sem, __FUNCTION__) +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) extern void FASTCALL(mutex_destroy(struct mutex *lock)); -extern void mutex_debug_show_all_locks(void); -extern void mutex_debug_show_held_locks(struct task_struct *filter); -extern void mutex_debug_check_no_locks_held(struct task_struct *task); -extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len); - #endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index f1ac507..27c48da 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -13,6 +13,7 @@ #include <linux/list.h> #include <linux/spinlock_types.h> #include <linux/linkage.h> +#include <linux/lockdep.h> #include <asm/atomic.h> @@ -50,11 +51,12 @@ struct mutex { struct list_head wait_list; #ifdef CONFIG_DEBUG_MUTEXES struct thread_info *owner; - struct list_head held_list; - unsigned long acquire_ip; const char *name; void *magic; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; /* @@ -74,24 +76,34 @@ struct mutex_waiter { # include <linux/mutex-debug.h> #else # define __DEBUG_MUTEX_INITIALIZER(lockname) -# define mutex_init(mutex) __mutex_init(mutex, NULL) +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) # define mutex_destroy(mutex) do { } while (0) -# define mutex_debug_show_all_locks() do { } while (0) -# define mutex_debug_show_held_locks(p) do { } while (0) -# define mutex_debug_check_no_locks_held(task) do { } while (0) -# define mutex_debug_check_no_locks_freed(from, len) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) #endif #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = SPIN_LOCK_UNLOCKED \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ - __DEBUG_MUTEX_INITIALIZER(lockname) } + __DEBUG_MUTEX_INITIALIZER(lockname) \ + __DEP_MAP_MUTEX_INITIALIZER(lockname) } #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -extern void fastcall __mutex_init(struct mutex *lock, const char *name); +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); /*** * mutex_is_locked - is the mutex locked @@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) */ extern void fastcall mutex_lock(struct mutex *lock); extern int fastcall mutex_lock_interruptible(struct mutex *lock); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +#else +# define mutex_lock_nested(lock, subclass) mutex_lock(lock) +#endif + /* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 51dbab9..7ff386a 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -65,7 +65,7 @@ struct raw_notifier_head { } while (0) #define ATOMIC_NOTIFIER_INIT(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = NULL } #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b093479..685081c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -728,6 +728,7 @@ #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 #define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 #define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_XX12 0x8039 #define PCI_DEVICE_ID_TI_1130 0xac12 @@ -1442,6 +1443,7 @@ #define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 +#define PCI_DEVICE_ID_RICOH_R5C822 0x0822 #define PCI_VENDOR_ID_DLINK 0x1186 #define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 diff --git a/include/linux/poison.h b/include/linux/poison.h index a5347c0..3e628f9 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -44,6 +44,11 @@ /********** drivers/atm/ **********/ #define ATM_POISON_FREE 0x12 +#define ATM_POISON 0xdeadbeef + +/********** net/ **********/ +#define NEIGHBOR_DEAD 0xdeadbeef +#define NETFILTER_LINK_POISON 0xdead57ac /********** kernel/mutexes **********/ #define MUTEX_DEBUG_INIT 0x11 diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index fa4a3b8..5d41dee 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -29,8 +29,6 @@ struct rt_mutex { struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; - struct list_head held_list_entry; - unsigned long acquire_ip; const char *name, *file; int line; void *magic; @@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); -#ifdef CONFIG_DEBUG_RT_MUTEXES -# define INIT_RT_MUTEX_DEBUG(tsk) \ - .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \ - .held_list_lock = SPIN_LOCK_UNLOCKED -#else -# define INIT_RT_MUTEX_DEBUG(tsk) -#endif - #ifdef CONFIG_RT_MUTEXES # define INIT_RT_MUTEXES(tsk) \ .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index f30f805..ae1fcad 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -32,30 +32,37 @@ struct rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; -#if RWSEM_DEBUG - int debug; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; #endif }; -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else -#define __RWSEM_DEBUG_INIT /* */ +# define __RWSEM_DEP_MAP_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ -{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } +{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) -extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + extern void FASTCALL(__down_read(struct rw_semaphore *sem)); extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); extern void FASTCALL(__down_write(struct rw_semaphore *sem)); +extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass)); extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); extern void FASTCALL(__up_read(struct rw_semaphore *sem)); extern void FASTCALL(__up_write(struct rw_semaphore *sem)); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f99fe90..658afb3 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -9,8 +9,6 @@ #include <linux/linkage.h> -#define RWSEM_DEBUG 0 - #ifdef __KERNEL__ #include <linux/types.h> @@ -26,89 +24,58 @@ struct rw_semaphore; #include <asm/rwsem.h> /* use an arch-specific implementation */ #endif -#ifndef rwsemtrace -#if RWSEM_DEBUG -extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str)); -#else -#define rwsemtrace(SEM,FMT) -#endif -#endif - /* * lock for reading */ -static inline void down_read(struct rw_semaphore *sem) -{ - might_sleep(); - rwsemtrace(sem,"Entering down_read"); - __down_read(sem); - rwsemtrace(sem,"Leaving down_read"); -} +extern void down_read(struct rw_semaphore *sem); /* * trylock for reading -- returns 1 if successful, 0 if contention */ -static inline int down_read_trylock(struct rw_semaphore *sem) -{ - int ret; - rwsemtrace(sem,"Entering down_read_trylock"); - ret = __down_read_trylock(sem); - rwsemtrace(sem,"Leaving down_read_trylock"); - return ret; -} +extern int down_read_trylock(struct rw_semaphore *sem); /* * lock for writing */ -static inline void down_write(struct rw_semaphore *sem) -{ - might_sleep(); - rwsemtrace(sem,"Entering down_write"); - __down_write(sem); - rwsemtrace(sem,"Leaving down_write"); -} +extern void down_write(struct rw_semaphore *sem); /* * trylock for writing -- returns 1 if successful, 0 if contention */ -static inline int down_write_trylock(struct rw_semaphore *sem) -{ - int ret; - rwsemtrace(sem,"Entering down_write_trylock"); - ret = __down_write_trylock(sem); - rwsemtrace(sem,"Leaving down_write_trylock"); - return ret; -} +extern int down_write_trylock(struct rw_semaphore *sem); /* * release a read lock */ -static inline void up_read(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering up_read"); - __up_read(sem); - rwsemtrace(sem,"Leaving up_read"); -} +extern void up_read(struct rw_semaphore *sem); /* * release a write lock */ -static inline void up_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering up_write"); - __up_write(sem); - rwsemtrace(sem,"Leaving up_write"); -} +extern void up_write(struct rw_semaphore *sem); /* * downgrade write lock to read lock */ -static inline void downgrade_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering downgrade_write"); - __downgrade_write(sem); - rwsemtrace(sem,"Leaving downgrade_write"); -} +extern void downgrade_write(struct rw_semaphore *sem); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * nested locking: + */ +extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern void down_write_nested(struct rw_semaphore *sem, int subclass); +/* + * Take/release a lock when not the owner will release it: + */ +extern void down_read_non_owner(struct rw_semaphore *sem); +extern void up_read_non_owner(struct rw_semaphore *sem); +#else +# define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nested(sem, subclass) down_write(sem) +# define down_read_non_owner(sem) down_read(sem) +# define up_read_non_owner(sem) up_read(sem) +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index aaf7233..1c876e2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu); extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; -typedef struct task_struct task_t; +struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); -extern void init_idle(task_t *idle, int cpu); +extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; @@ -383,7 +383,7 @@ struct signal_struct { wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ - task_t *curr_target; + struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; @@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) -typedef struct prio_array prio_array_t; struct backing_dev_info; struct reclaim_state; @@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct*); +extern void prefetch_stack(struct task_struct *t); #else static inline void prefetch_stack(struct task_struct *t) { } #endif @@ -715,6 +714,8 @@ enum sleep_type { SLEEP_INTERRUPTED, }; +struct prio_array; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ struct thread_info *thread_info; @@ -732,7 +733,7 @@ struct task_struct { int load_weight; /* for niceness load balancing purposes */ int prio, static_prio, normal_prio; struct list_head run_list; - prio_array_t *array; + struct prio_array *array; unsigned short ioprio; unsigned int btrace_seq; @@ -865,16 +866,34 @@ struct task_struct { struct plist_head pi_waiters; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; -# ifdef CONFIG_DEBUG_RT_MUTEXES - spinlock_t held_list_lock; - struct list_head held_list_head; -# endif #endif #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif +#ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + int hardirqs_enabled; + unsigned long hardirq_enable_ip; + unsigned int hardirq_enable_event; + unsigned long hardirq_disable_ip; + unsigned int hardirq_disable_event; + int softirqs_enabled; + unsigned long softirq_disable_ip; + unsigned int softirq_disable_event; + unsigned long softirq_enable_ip; + unsigned int softirq_enable_event; + int hardirq_context; + int softirq_context; +#endif +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 30UL + u64 curr_chain_key; + int lockdep_depth; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + unsigned int lockdep_recursion; +#endif /* journalling filesystem info */ void *journal_info; @@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t) #define used_math() tsk_used_math(current) #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { if (!cpu_isset(0, new_mask)) return -EINVAL; @@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); -extern unsigned long long current_sched_time(const task_t *current_task); +extern unsigned long long +current_sched_time(const struct task_struct *current_task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); #ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(task_t *p); -extern void rt_mutex_setprio(task_t *p, int prio); -extern void rt_mutex_adjust_pi(task_t *p); +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p); #else -static inline int rt_mutex_getprio(task_t *p) +static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } # define rt_mutex_adjust_pi(p) do { } while (0) #endif -extern void set_user_nice(task_t *p, long nice); -extern int task_prio(const task_t *p); -extern int task_nice(const task_t *p); -extern int can_nice(const task_t *p, const int nice); -extern int task_curr(const task_t *p); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern task_t *idle_task(int cpu); -extern task_t *curr_task(int cpu); -extern void set_curr_task(int cpu, task_t *p); +extern struct task_struct *idle_task(int cpu); +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); @@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); -extern void FASTCALL(sched_exit(task_t * p)); +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); +extern void FASTCALL(sched_exit(struct task_struct * p)); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int); extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); -extern task_t *child_reaper; +extern struct task_struct *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -task_t *fork_idle(int); +struct task_struct *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(task_t * p); +extern void wait_task_inactive(struct task_struct * p); #else #define wait_task_inactive(p) do { } while (0) #endif @@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p); /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) -static inline task_t *next_thread(const task_t *p) +static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry(rcu_dereference(p->thread_group.next), - task_t, thread_group); + struct task_struct, thread_group); } -static inline int thread_group_empty(task_t *p) +static inline int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7bc5c7c..4600093 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -38,9 +38,17 @@ typedef struct { * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } -#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define SEQLOCK_UNLOCKED \ + __SEQLOCK_UNLOCKED(old_style_seqlock_init) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index fc1104a..058cba7 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -216,10 +216,11 @@ struct uart_port { unsigned char __iomem *membase; /* read/write[bwl] */ unsigned int irq; /* irq number */ unsigned int uartclk; /* base uart clock */ - unsigned char fifosize; /* tx fifo size */ + unsigned int fifosize; /* tx fifo size */ unsigned char x_char; /* xon/xoff char */ unsigned char regshift; /* reg offset shift */ unsigned char iotype; /* io access style */ + unsigned char unused1; #define UPIO_PORT (0) #define UPIO_HUB6 (1) diff --git a/include/linux/signal.h b/include/linux/signal.h index 1e4ce72..117135e 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -9,32 +9,6 @@ #include <linux/spinlock.h> /* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - * SA_PROBEIRQ is set by callers when they expect sharing mismatches to occur - */ -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#define SA_PROBEIRQ 0x08000000 - -/* - * As above, these correspond to the IORESOURCE_IRQ_* defines in - * linux/ioport.h to select the interrupt line behaviour. When - * requesting an interrupt without specifying a SA_TRIGGER, the - * setting should be assumed to be "as already configured", which - * may be as per machine or firmware initialisation. - */ -#define SA_TRIGGER_LOW 0x00000008 -#define SA_TRIGGER_HIGH 0x00000004 -#define SA_TRIGGER_FALLING 0x00000002 -#define SA_TRIGGER_RISING 0x00000001 -#define SA_TRIGGER_MASK (SA_TRIGGER_HIGH|SA_TRIGGER_LOW|\ - SA_TRIGGER_RISING|SA_TRIGGER_FALLING) - -/* * Real Time signals may be queued. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 57d7d49..3597b4f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +extern struct lock_class_key skb_queue_lock_key; + static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); + lockdep_set_class(&list->lock, &skb_queue_lock_key); list->prev = list->next = (struct sk_buff *)list; list->qlen = 0; } diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ae23bee..31473db 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): */ -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP # include <asm/spinlock.h> #else # include <linux/spinlock_up.h> #endif -#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) -#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key); +# define spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __spin_lock_init((lock), #lock, &__key); \ +} while (0) + +#else +# define spin_lock_init(lock) \ + do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# define rwlock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key); \ +} while (0) +#else +# define rwlock_init(lock) \ + do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#endif #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) @@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); - extern void _raw_read_lock(rwlock_t *lock); extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); @@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else -# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) @@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); #define write_trylock(lock) __cond_lock(_write_trylock(lock)) #define spin_lock(lock) _spin_lock(lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +#else +# define spin_lock_nested(lock, subclass) _spin_lock(lock) +#endif + #define write_lock(lock) _write_lock(lock) #define read_lock(lock) _read_lock(lock) @@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); /* * We inline the unlock functions in the nondebug case: */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) +#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ + !defined(CONFIG_SMP) # define spin_unlock(lock) _spin_unlock(lock) # define read_unlock(lock) _read_unlock(lock) # define write_unlock(lock) _write_unlock(lock) -#else -# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) -# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) -# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) -#endif - -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) # define spin_unlock_irq(lock) _spin_unlock_irq(lock) # define read_unlock_irq(lock) _read_unlock_irq(lock) # define write_unlock_irq(lock) _write_unlock_irq(lock) #else +# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) +# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) +# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) # define spin_unlock_irq(lock) \ do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) # define read_unlock_irq(lock) \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 78e6989..b2c4f82 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr); #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); +void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) + __acquires(spinlock_t); void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index cd81cee..67faa04 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -49,6 +49,7 @@ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) #define _spin_lock(lock) __LOCK(lock) +#define _spin_lock_nested(lock, subclass) __LOCK(lock) #define _read_lock(lock) __LOCK(lock) #define _write_lock(lock) __LOCK(lock) #define _spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 9cb51e0..dc5fb69 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,6 +9,8 @@ * Released under the General Public License (GPL). */ +#include <linux/lockdep.h> + #if defined(CONFIG_SMP) # include <asm/spinlock_types.h> #else @@ -24,6 +26,9 @@ typedef struct { unsigned int magic, owner_cpu; void *owner; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead @@ -37,31 +42,53 @@ typedef struct { unsigned int magic, owner_cpu; void *owner; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } rwlock_t; #define RWLOCK_MAGIC 0xdeaf1eed #define SPINLOCK_OWNER_INIT ((void *)-1L) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + #ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } -#define RW_LOCK_UNLOCKED \ + .owner_cpu = -1, \ + SPIN_DEP_MAP_INIT(lockname) } +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ - .owner_cpu = -1 } + .owner_cpu = -1, \ + RW_DEP_MAP_INIT(lockname) } #else -# define SPIN_LOCK_UNLOCKED \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } -#define RW_LOCK_UNLOCKED \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } +# define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + SPIN_DEP_MAP_INIT(lockname) } +#define __RW_LOCK_UNLOCKED(lockname) \ + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + RW_DEP_MAP_INIT(lockname) } #endif -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0..27644af 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -12,10 +12,14 @@ * Released under the General Public License (GPL). */ -#ifdef CONFIG_DEBUG_SPINLOCK +#if defined(CONFIG_DEBUG_SPINLOCK) || \ + defined(CONFIG_DEBUG_LOCK_ALLOC) typedef struct { volatile unsigned int slock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } @@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t; typedef struct { /* no debug version on UP */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { } diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 31accf2..ea54c4c 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,7 +18,6 @@ */ #ifdef CONFIG_DEBUG_SPINLOCK - #define __raw_spin_is_locked(x) ((x)->slock == 0) static inline void __raw_spin_lock(raw_spinlock_t *lock) diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h new file mode 100644 index 0000000..9cc81e5 --- /dev/null +++ b/include/linux/stacktrace.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_STACKTRACE_H +#define __LINUX_STACKTRACE_H + +#ifdef CONFIG_STACKTRACE +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; +}; + +extern void save_stack_trace(struct stack_trace *trace, + struct task_struct *task, int all_contexts, + unsigned int skip); + +extern void print_stack_trace(struct stack_trace *trace, int spaces); +#else +# define save_stack_trace(trace, task, all, skip) do { } while (0) +# define print_stack_trace(trace) do { } while (0) +#endif + +#endif diff --git a/include/linux/swap.h b/include/linux/swap.h index cf6ca6e..5e59184 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -189,6 +189,7 @@ extern long vm_total_pages; #ifdef CONFIG_NUMA extern int zone_reclaim_mode; +extern int sysctl_min_unmapped_ratio; extern int zone_reclaim(struct zone *, gfp_t, unsigned int); #else #define zone_reclaim_mode 0 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 46e4d8f..e4b1a4d 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -188,7 +188,7 @@ enum VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ - VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ + VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ }; diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index dc7c621..46919f9 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -1,4 +1,4 @@ -#include <linux/version.h> +#include <linux/utsrelease.h> #include <linux/module.h> /* Simply sanity version stamp for modules. */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 544e855..794be7a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -68,7 +68,7 @@ struct task_struct; wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ @@ -77,9 +77,15 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } +/* + * lockdep: we want one lock-class for all waitqueue locks. + */ +extern struct lock_class_key waitqueue_lock_key; + static inline void init_waitqueue_head(wait_queue_head_t *q) { spin_lock_init(&q->lock); + lockdep_set_class(&q->lock, &waitqueue_lock_key); INIT_LIST_HEAD(&q->task_list); } diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h index 31329fc..1da3f7f 100644 --- a/include/mtd/mtd-abi.h +++ b/include/mtd/mtd-abi.h @@ -133,7 +133,7 @@ struct nand_ecclayout { }; /** - * struct mtd_ecc_stats - error correction status + * struct mtd_ecc_stats - error correction stats * * @corrected: number of corrected bits * @failed: number of uncorrectable errors diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 5ba72d9..2fec827 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -67,6 +67,9 @@ struct unix_skb_parms { #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) #define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_wlock_nested(s) \ + spin_lock_nested(&unix_sk(s)->lock, \ + SINGLE_DEPTH_NESTING) #define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) #ifdef __KERNEL__ diff --git a/include/net/ax25.h b/include/net/ax25.h index 7cd528e..69374cd 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -182,14 +182,26 @@ typedef struct { typedef struct ax25_route { struct ax25_route *next; - atomic_t ref; + atomic_t refcount; ax25_address callsign; struct net_device *dev; ax25_digi *digipeat; char ip_mode; - struct timer_list timer; } ax25_route; +static inline void ax25_hold_route(ax25_route *ax25_rt) +{ + atomic_inc(&ax25_rt->refcount); +} + +extern void __ax25_put_route(ax25_route *ax25_rt); + +static inline void ax25_put_route(ax25_route *ax25_rt) +{ + if (atomic_dec_and_test(&ax25_rt->refcount)) + __ax25_put_route(ax25_rt); +} + typedef struct { char slave; /* slave_mode? */ struct timer_list slave_timer; /* timeout timer */ @@ -348,17 +360,11 @@ extern int ax25_check_iframes_acked(ax25_cb *, unsigned short); extern void ax25_rt_device_down(struct net_device *); extern int ax25_rt_ioctl(unsigned int, void __user *); extern struct file_operations ax25_route_fops; +extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); extern int ax25_rt_autobind(ax25_cb *, ax25_address *); -extern ax25_route *ax25_rt_find_route(ax25_route *, ax25_address *, - struct net_device *); extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); extern void ax25_rt_free(void); -static inline void ax25_put_route(ax25_route *ax25_rt) -{ - atomic_dec(&ax25_rt->ref); -} - /* ax25_std_in.c */ extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 911ceb5..771d177 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -175,6 +175,6 @@ extern int hci_sock_cleanup(void); extern int bt_sysfs_init(void); extern void bt_sysfs_cleanup(void); -extern struct class bt_class; +extern struct class *bt_class; #endif /* __BLUETOOTH_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index b06a2d2..b2bdb1a 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -54,7 +54,8 @@ /* HCI device quirks */ enum { HCI_QUIRK_RESET_ON_INIT, - HCI_QUIRK_RAW_DEVICE + HCI_QUIRK_RAW_DEVICE, + HCI_QUIRK_FIXUP_BUFFER_SIZE }; /* HCI device flags */ @@ -100,9 +101,10 @@ enum { #define HCIINQUIRY _IOR('H', 240, int) /* HCI timeouts */ -#define HCI_CONN_TIMEOUT (HZ * 40) -#define HCI_DISCONN_TIMEOUT (HZ * 2) -#define HCI_CONN_IDLE_TIMEOUT (HZ * 60) +#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ +#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ +#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ +#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ /* HCI Packet types */ #define HCI_COMMAND_PKT 0x01 @@ -144,7 +146,7 @@ enum { #define LMP_TACCURACY 0x10 #define LMP_RSWITCH 0x20 #define LMP_HOLD 0x40 -#define LMP_SNIF 0x80 +#define LMP_SNIFF 0x80 #define LMP_PARK 0x01 #define LMP_RSSI 0x02 @@ -159,13 +161,21 @@ enum { #define LMP_PSCHEME 0x02 #define LMP_PCONTROL 0x04 +#define LMP_SNIFF_SUBR 0x02 + +/* Connection modes */ +#define HCI_CM_ACTIVE 0x0000 +#define HCI_CM_HOLD 0x0001 +#define HCI_CM_SNIFF 0x0002 +#define HCI_CM_PARK 0x0003 + /* Link policies */ #define HCI_LP_RSWITCH 0x0001 #define HCI_LP_HOLD 0x0002 #define HCI_LP_SNIFF 0x0004 #define HCI_LP_PARK 0x0008 -/* Link mode */ +/* Link modes */ #define HCI_LM_ACCEPT 0x8000 #define HCI_LM_MASTER 0x0001 #define HCI_LM_AUTH 0x0002 @@ -191,7 +201,7 @@ struct hci_rp_read_loc_version { } __attribute__ ((packed)); #define OCF_READ_LOCAL_FEATURES 0x0003 -struct hci_rp_read_loc_features { +struct hci_rp_read_local_features { __u8 status; __u8 features[8]; } __attribute__ ((packed)); @@ -375,17 +385,32 @@ struct hci_cp_change_conn_link_key { } __attribute__ ((packed)); #define OCF_READ_REMOTE_FEATURES 0x001B -struct hci_cp_read_rmt_features { +struct hci_cp_read_remote_features { __le16 handle; } __attribute__ ((packed)); #define OCF_READ_REMOTE_VERSION 0x001D -struct hci_cp_read_rmt_version { +struct hci_cp_read_remote_version { __le16 handle; } __attribute__ ((packed)); /* Link Policy */ -#define OGF_LINK_POLICY 0x02 +#define OGF_LINK_POLICY 0x02 + +#define OCF_SNIFF_MODE 0x0003 +struct hci_cp_sniff_mode { + __le16 handle; + __le16 max_interval; + __le16 min_interval; + __le16 attempt; + __le16 timeout; +} __attribute__ ((packed)); + +#define OCF_EXIT_SNIFF_MODE 0x0004 +struct hci_cp_exit_sniff_mode { + __le16 handle; +} __attribute__ ((packed)); + #define OCF_ROLE_DISCOVERY 0x0009 struct hci_cp_role_discovery { __le16 handle; @@ -406,7 +431,7 @@ struct hci_rp_read_link_policy { __le16 policy; } __attribute__ ((packed)); -#define OCF_SWITCH_ROLE 0x000B +#define OCF_SWITCH_ROLE 0x000B struct hci_cp_switch_role { bdaddr_t bdaddr; __u8 role; @@ -422,6 +447,14 @@ struct hci_rp_write_link_policy { __le16 handle; } __attribute__ ((packed)); +#define OCF_SNIFF_SUBRATE 0x0011 +struct hci_cp_sniff_subrate { + __le16 handle; + __le16 max_latency; + __le16 min_remote_timeout; + __le16 min_local_timeout; +} __attribute__ ((packed)); + /* Status params */ #define OGF_STATUS_PARAM 0x05 @@ -581,15 +614,15 @@ struct hci_ev_link_key_notify { __u8 key_type; } __attribute__ ((packed)); -#define HCI_EV_RMT_FEATURES 0x0B -struct hci_ev_rmt_features { +#define HCI_EV_REMOTE_FEATURES 0x0B +struct hci_ev_remote_features { __u8 status; __le16 handle; __u8 features[8]; } __attribute__ ((packed)); -#define HCI_EV_RMT_VERSION 0x0C -struct hci_ev_rmt_version { +#define HCI_EV_REMOTE_VERSION 0x0C +struct hci_ev_remote_version { __u8 status; __le16 handle; __u8 lmp_ver; @@ -610,6 +643,16 @@ struct hci_ev_pscan_rep_mode { __u8 pscan_rep_mode; } __attribute__ ((packed)); +#define HCI_EV_SNIFF_SUBRATE 0x2E +struct hci_ev_sniff_subrate { + __u8 status; + __le16 handle; + __le16 max_tx_latency; + __le16 max_rx_latency; + __le16 max_remote_timeout; + __le16 max_local_timeout; +} __attribute__ ((packed)); + /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xFD struct hci_ev_stack_internal { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index bb9f81d..d84855f 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -31,10 +31,7 @@ #define HCI_PROTO_L2CAP 0 #define HCI_PROTO_SCO 1 -#define HCI_INIT_TIMEOUT (HZ * 10) - /* HCI Core structures */ - struct inquiry_data { bdaddr_t bdaddr; __u8 pscan_rep_mode; @@ -81,6 +78,10 @@ struct hci_dev { __u16 link_policy; __u16 link_mode; + __u32 idle_timeout; + __u16 sniff_min_interval; + __u16 sniff_max_interval; + unsigned long quirks; atomic_t cmd_cnt; @@ -123,7 +124,8 @@ struct hci_dev { atomic_t promisc; - struct class_device class_dev; + struct device *parent; + struct device dev; struct module *owner; @@ -145,18 +147,24 @@ struct hci_conn { bdaddr_t dst; __u16 handle; __u16 state; + __u8 mode; __u8 type; __u8 out; __u8 dev_class[3]; + __u8 features[8]; + __u16 interval; + __u16 link_policy; __u32 link_mode; + __u8 power_save; unsigned long pend; - + unsigned int sent; - + struct sk_buff_head data_q; - struct timer_list timer; - + struct timer_list disc_timer; + struct timer_list idle_timer; + struct hci_dev *hdev; void *l2cap_data; void *sco_data; @@ -211,7 +219,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); enum { HCI_CONN_AUTH_PEND, HCI_CONN_ENCRYPT_PEND, - HCI_CONN_RSWITCH_PEND + HCI_CONN_RSWITCH_PEND, + HCI_CONN_MODE_CHANGE_PEND, }; static inline void hci_conn_hash_init(struct hci_dev *hdev) @@ -286,31 +295,27 @@ int hci_conn_encrypt(struct hci_conn *conn); int hci_conn_change_link_key(struct hci_conn *conn); int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); -static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout) -{ - mod_timer(&conn->timer, jiffies + timeout); -} - -static inline void hci_conn_del_timer(struct hci_conn *conn) -{ - del_timer(&conn->timer); -} +void hci_conn_enter_active_mode(struct hci_conn *conn); +void hci_conn_enter_sniff_mode(struct hci_conn *conn); static inline void hci_conn_hold(struct hci_conn *conn) { atomic_inc(&conn->refcnt); - hci_conn_del_timer(conn); + del_timer(&conn->disc_timer); } static inline void hci_conn_put(struct hci_conn *conn) { if (atomic_dec_and_test(&conn->refcnt)) { + unsigned long timeo; if (conn->type == ACL_LINK) { - unsigned long timeo = (conn->out) ? - HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2; - hci_conn_set_timer(conn, timeo); + timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT); + if (!conn->out) + timeo *= 2; + del_timer(&conn->idle_timer); } else - hci_conn_set_timer(conn, HZ / 100); + timeo = msecs_to_jiffies(10); + mod_timer(&conn->disc_timer, jiffies + timeo); } } @@ -408,11 +413,13 @@ static inline int hci_recv_frame(struct sk_buff *skb) int hci_register_sysfs(struct hci_dev *hdev); void hci_unregister_sysfs(struct hci_dev *hdev); -#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev)) +#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev)) /* ----- LMP capabilities ----- */ -#define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH) -#define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT) +#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH) +#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) +#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) +#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) /* ----- HCI protocols ----- */ struct hci_proto { diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h index 0575c59..bca19ca 100644 --- a/include/net/irda/irda_device.h +++ b/include/net/irda/irda_device.h @@ -160,7 +160,7 @@ typedef struct { int irq, irq2; /* Interrupts used */ int dma, dma2; /* DMA channel(s) used */ int fifo_size; /* FIFO size */ - int irqflags; /* interrupt flags (ie, SA_SHIRQ|SA_INTERRUPT) */ + int irqflags; /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */ int direction; /* Link direction, used by some FIR drivers */ int enabled; /* Powered on? */ int suspended; /* Suspended by APM */ diff --git a/include/net/sock.h b/include/net/sock.h index 7b3d6b8..324b3ea 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -44,6 +44,7 @@ #include <linux/timer.h> #include <linux/cache.h> #include <linux/module.h> +#include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/skbuff.h> /* struct sk_buff */ #include <linux/security.h> @@ -78,14 +79,17 @@ typedef struct { spinlock_t slock; struct sock_iocb *owner; wait_queue_head_t wq; + /* + * We express the mutex-alike socket_lock semantics + * to the lock validator by explicitly managing + * the slock as a lock variant (in addition to + * the slock itself): + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } socket_lock_t; -#define sock_lock_init(__sk) \ -do { spin_lock_init(&((__sk)->sk_lock.slock)); \ - (__sk)->sk_lock.owner = NULL; \ - init_waitqueue_head(&((__sk)->sk_lock.wq)); \ -} while(0) - struct sock; struct proto; @@ -747,6 +751,9 @@ extern void FASTCALL(release_sock(struct sock *sk)); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) +#define bh_lock_sock_nested(__sk) \ + spin_lock_nested(&((__sk)->sk_lock.slock), \ + SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) extern struct sock *sk_alloc(int family, diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 253797c..55ebf03 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -47,10 +47,19 @@ enum iscsi_uevent_e { ISCSI_UEVENT_TRANSPORT_EP_POLL = UEVENT_BASE + 13, ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14, + ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, + /* up events */ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2, ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3, + ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, +}; + +enum iscsi_tgt_dscvr { + ISCSI_TGT_DSCVR_SEND_TARGETS = 1, + ISCSI_TGT_DSCVR_ISNS = 2, + ISCSI_TGT_DSCVR_SLP = 3, }; struct iscsi_uevent { @@ -116,6 +125,17 @@ struct iscsi_uevent { struct msg_transport_disconnect { uint64_t ep_handle; } ep_disconnect; + struct msg_tgt_dscvr { + enum iscsi_tgt_dscvr type; + uint32_t host_no; + /* + * enable = 1 to establish a new connection + * with the server. enable = 0 to disconnect + * from the server. Used primarily to switch + * from one iSNS server to another. + */ + uint32_t enable; + } tgt_dscvr; } u; union { /* messages k -> u */ @@ -138,6 +158,10 @@ struct iscsi_uevent { uint32_t cid; uint32_t error; /* enum iscsi_err */ } connerror; + struct msg_session_destroyed { + uint32_t host_no; + uint32_t sid; + } d_session; struct msg_transport_connect_ret { uint64_t handle; } ep_connect_ret; diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index cbf7e58..ba27608 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -157,6 +157,11 @@ struct iscsi_conn { int max_xmit_dlength; /* target_max_recv_dsl */ int hdrdgst_en; int datadgst_en; + int ifmarker_en; + int ofmarker_en; + /* values userspace uses to id a conn */ + int persistent_port; + char *persistent_address; /* MIB-statistics */ uint64_t txdata_octets; @@ -196,8 +201,8 @@ struct iscsi_session { int pdu_inorder_en; int dataseq_inorder_en; int erl; - int ifmarker_en; - int ofmarker_en; + int tpgt; + char *targetname; /* control data */ struct iscsi_transport *tt; @@ -240,6 +245,10 @@ iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *, extern void iscsi_session_teardown(struct iscsi_cls_session *); extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *); extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); +extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen); +extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session, + enum iscsi_param param, char *buf); #define session_to_cls(_sess) \ hostdata_session(_sess->host->hostdata) @@ -255,6 +264,8 @@ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, int); extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); +extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf); /* * pdu and task processing diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index e46cd40..371f70d 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -143,7 +143,7 @@ struct scsi_cmnd { extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); extern void scsi_put_command(struct scsi_cmnd *); -extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); +extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_finish_command(struct scsi_cmnd *cmd); extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index a42efd6..b3dd90f 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -542,6 +542,9 @@ struct Scsi_Host { */ unsigned ordered_tag:1; + /* task mgmt function in progress */ + unsigned tmf_in_progress:1; + /* * Optional work queue to be utilized by the transport */ @@ -619,7 +622,8 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost) { return shost->shost_state == SHOST_RECOVERY || shost->shost_state == SHOST_CANCEL_RECOVERY || - shost->shost_state == SHOST_DEL_RECOVERY; + shost->shost_state == SHOST_DEL_RECOVERY || + shost->tmf_in_progress; } extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index b684426..5a3df1d 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -34,6 +34,7 @@ struct iscsi_cls_conn; struct iscsi_conn; struct iscsi_cmd_task; struct iscsi_mgmt_task; +struct sockaddr; /** * struct iscsi_transport - iSCSI Transport template @@ -46,7 +47,12 @@ struct iscsi_mgmt_task; * @bind_conn: associate this connection with existing iSCSI session * and specified transport descriptor * @destroy_conn: destroy inactive iSCSI connection - * @set_param: set iSCSI Data-Path operational parameter + * @set_param: set iSCSI parameter. Return 0 on success, -ENODATA + * when param is not supported, and a -Exx value on other + * error. + * @get_param get iSCSI parameter. Must return number of bytes + * copied to buffer on success, -ENODATA when param + * is not supported, and a -Exx value on other error * @start_conn: set connection to be operational * @stop_conn: suspend/recover/terminate connection * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. @@ -97,15 +103,11 @@ struct iscsi_transport { void (*stop_conn) (struct iscsi_cls_conn *conn, int flag); void (*destroy_conn) (struct iscsi_cls_conn *conn); int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param, - uint32_t value); + char *buf, int buflen); int (*get_conn_param) (struct iscsi_cls_conn *conn, - enum iscsi_param param, uint32_t *value); + enum iscsi_param param, char *buf); int (*get_session_param) (struct iscsi_cls_session *session, - enum iscsi_param param, uint32_t *value); - int (*get_conn_str_param) (struct iscsi_cls_conn *conn, - enum iscsi_param param, char *buf); - int (*get_session_str_param) (struct iscsi_cls_session *session, - enum iscsi_param param, char *buf); + enum iscsi_param param, char *buf); int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size); void (*get_stats) (struct iscsi_cls_conn *conn, @@ -127,6 +129,8 @@ struct iscsi_transport { uint64_t *ep_handle); int (*ep_poll) (uint64_t ep_handle, int timeout_ms); void (*ep_disconnect) (uint64_t ep_handle); + int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no, + uint32_t enable, struct sockaddr *dst_addr); }; /* @@ -155,13 +159,6 @@ struct iscsi_cls_conn { struct iscsi_transport *transport; uint32_t cid; /* connection id */ - /* portal/group values we got during discovery */ - char *persistent_address; - int persistent_port; - /* portal/group values we are currently using */ - char *address; - int port; - int active; /* must be accessed with the connlock */ struct device dev; /* sysfs transport/container device */ struct mempool_zone *z_error; @@ -185,16 +182,11 @@ struct iscsi_cls_session { struct list_head host_list; struct iscsi_transport *transport; - /* iSCSI values used as unique id by userspace. */ - char *targetname; - int tpgt; - /* recovery fields */ int recovery_tmo; struct work_struct recovery_work; int target_id; - int channel; int sid; /* session id */ void *dd_data; /* LLD private data */ @@ -207,8 +199,10 @@ struct iscsi_cls_session { #define iscsi_session_to_shost(_session) \ dev_to_shost(_session->dev.parent) +#define starget_to_session(_stgt) \ + iscsi_dev_to_session(_stgt->dev.parent) + struct iscsi_host { - int next_target_id; struct list_head sessions; struct mutex mutex; }; @@ -216,8 +210,17 @@ struct iscsi_host { /* * session and connection functions that can be used by HW iSCSI LLDs */ +extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost, + struct iscsi_transport *transport); +extern int iscsi_add_session(struct iscsi_cls_session *session, + unsigned int target_id); +extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn); +extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn); extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, - struct iscsi_transport *t, int channel); + struct iscsi_transport *t, + unsigned int target_id); +extern void iscsi_remove_session(struct iscsi_cls_session *session); +extern void iscsi_free_session(struct iscsi_cls_session *session); extern int iscsi_destroy_session(struct iscsi_cls_session *session); extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, uint32_t cid); @@ -225,4 +228,5 @@ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); extern void iscsi_unblock_session(struct iscsi_cls_session *session); extern void iscsi_block_session(struct iscsi_cls_session *session); + #endif diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 93cfb4b..e3c503c 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -3,6 +3,7 @@ #include <linux/transport_class.h> #include <linux/types.h> +#include <linux/mutex.h> struct scsi_transport_template; struct sas_rphy; @@ -55,7 +56,6 @@ struct sas_phy { enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate_hw; enum sas_linkrate maximum_linkrate; - u8 port_identifier; /* internal state */ unsigned int local_attached : 1; @@ -66,8 +66,8 @@ struct sas_phy { u32 loss_of_dword_sync_count; u32 phy_reset_problem_count; - /* the other end of the link */ - struct sas_rphy *rphy; + /* for the list of phys belonging to a port */ + struct list_head port_siblings; }; #define dev_to_phy(d) \ @@ -124,6 +124,24 @@ struct sas_expander_device { #define rphy_to_expander_device(r) \ container_of((r), struct sas_expander_device, rphy) +struct sas_port { + struct device dev; + + u8 port_identifier; + int num_phys; + + /* the other end of the link */ + struct sas_rphy *rphy; + + struct mutex phy_list_mutex; + struct list_head phy_list; +}; + +#define dev_to_sas_port(d) \ + container_of((d), struct sas_port, dev) +#define transport_class_to_sas_port(cdev) \ + dev_to_sas_port((cdev)->dev) + /* The functions by which the transport class and the driver communicate */ struct sas_function_template { int (*get_linkerrors)(struct sas_phy *); @@ -133,6 +151,7 @@ struct sas_function_template { }; +void sas_remove_children(struct device *); extern void sas_remove_host(struct Scsi_Host *); extern struct sas_phy *sas_phy_alloc(struct device *, int); @@ -141,13 +160,21 @@ extern int sas_phy_add(struct sas_phy *); extern void sas_phy_delete(struct sas_phy *); extern int scsi_is_sas_phy(const struct device *); -extern struct sas_rphy *sas_end_device_alloc(struct sas_phy *); -extern struct sas_rphy *sas_expander_alloc(struct sas_phy *, enum sas_device_type); +extern struct sas_rphy *sas_end_device_alloc(struct sas_port *); +extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type); void sas_rphy_free(struct sas_rphy *); extern int sas_rphy_add(struct sas_rphy *); extern void sas_rphy_delete(struct sas_rphy *); extern int scsi_is_sas_rphy(const struct device *); +struct sas_port *sas_port_alloc(struct device *, int); +int sas_port_add(struct sas_port *); +void sas_port_free(struct sas_port *); +void sas_port_delete(struct sas_port *); +void sas_port_add_phy(struct sas_port *, struct sas_phy *); +void sas_port_delete_phy(struct sas_port *, struct sas_phy *); +int scsi_is_sas_port(const struct device *); + extern struct scsi_transport_template * sas_attach_transport(struct sas_function_template *); extern void sas_release_transport(struct scsi_transport_template *); diff --git a/include/sound/initval.h b/include/sound/initval.h index d45170b..2ae76ef 100644 --- a/include/sound/initval.h +++ b/include/sound/initval.h @@ -62,7 +62,7 @@ static int snd_legacy_find_free_irq(int *irq_table) { while (*irq_table != -1) { if (!request_irq(*irq_table, snd_legacy_empty_irq_handler, - SA_INTERRUPT | SA_PROBEIRQ, "ALSA Test IRQ", + IRQF_DISABLED | IRQF_PROBE_SHARED, "ALSA Test IRQ", (void *) irq_table)) { free_irq(*irq_table, (void *) irq_table); return *irq_table; |