summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-07-25 22:46:44 (GMT)
committerTony Luck <tony.luck@intel.com>2005-07-25 22:46:44 (GMT)
commit05cb784c81a0fd1f97732156ea464bd392ce875a (patch)
tree24122979b411dcec6ff390fc9ae84ad9413128e9 /include
parent3190186362466658f01b2e354e639378ce07e1a9 (diff)
parent6b6a93c6876ea1c530d5d3f68e3678093a27fab0 (diff)
downloadlinux-fsl-qoriq-05cb784c81a0fd1f97732156ea464bd392ce875a.tar.xz
Auto merge with /home/aegl/GIT/linus
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-imx/imxfb.h1
-rw-r--r--include/asm-arm/locks.h4
-rw-r--r--include/asm-arm/spinlock.h35
-rw-r--r--include/asm-i386/i387.h26
-rw-r--r--include/asm-sparc64/bitops.h56
-rw-r--r--include/asm-sparc64/ptrace.h5
-rw-r--r--include/asm-sparc64/rwsem.h48
-rw-r--r--include/asm-sparc64/spitfire.h130
-rw-r--r--include/asm-sparc64/system.h11
-rw-r--r--include/asm-sparc64/thread_info.h12
-rw-r--r--include/asm-sparc64/timer.h41
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_helper.h7
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h5
-rw-r--r--include/net/sctp/sctp.h7
-rw-r--r--include/net/xfrm.h2
19 files changed, 102 insertions, 298 deletions
diff --git a/include/asm-arm/arch-imx/imxfb.h b/include/asm-arm/arch-imx/imxfb.h
index 2346d45..7dbc7bb 100644
--- a/include/asm-arm/arch-imx/imxfb.h
+++ b/include/asm-arm/arch-imx/imxfb.h
@@ -25,6 +25,7 @@ struct imxfb_mach_info {
u_int pcr;
u_int pwmr;
u_int lscr1;
+ u_int dmacr;
u_char * fixed_screen_cpu;
dma_addr_t fixed_screen_dma;
diff --git a/include/asm-arm/locks.h b/include/asm-arm/locks.h
index c26298f..9cb33fc 100644
--- a/include/asm-arm/locks.h
+++ b/include/asm-arm/locks.h
@@ -61,7 +61,7 @@
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
-" teq lr, #0\n" \
+" cmp lr, #0\n" \
" movle ip, %0\n" \
" blle " #wake \
: \
@@ -100,7 +100,7 @@
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
-" add lr, lr, %1\n" \
+" adds lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 1823236..9705d5e 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -79,7 +79,8 @@ typedef struct {
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0)
+#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
+#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
/*
* Write locks are easy - we just set bit 31. When unlocking, we can
@@ -100,6 +101,21 @@ static inline void _raw_write_lock(rwlock_t *rw)
: "cc", "memory");
}
+static inline int _raw_write_trylock(rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]"
+ : "=&r" (tmp)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc", "memory");
+
+ return tmp == 0;
+}
+
static inline void _raw_write_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
@@ -138,6 +154,8 @@ static inline void _raw_read_lock(rwlock_t *rw)
static inline void _raw_read_unlock(rwlock_t *rw)
{
+ unsigned long tmp, tmp2;
+
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -151,19 +169,4 @@ static inline void _raw_read_unlock(rwlock_t *rw)
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
-static inline int _raw_write_trylock(rwlock_t *rw)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc", "memory");
-
- return tmp == 0;
-}
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index f6feb98..6747006 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -19,10 +19,21 @@
extern void mxcsr_feature_mask_init(void);
extern void init_fpu(struct task_struct *);
+
/*
* FPU lazy state save handling...
*/
-extern void restore_fpu( struct task_struct *tsk );
+
+/*
+ * The "nop" is needed to make the instructions the same
+ * length.
+ */
+#define restore_fpu(tsk) \
+ alternative_input( \
+ "nop ; frstor %1", \
+ "fxrstor %1", \
+ X86_FEATURE_FXSR, \
+ "m" ((tsk)->thread.i387.fxsave))
extern void kernel_fpu_begin(void);
#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
@@ -32,13 +43,12 @@ extern void kernel_fpu_begin(void);
*/
static inline void __save_init_fpu( struct task_struct *tsk )
{
- if ( cpu_has_fxsr ) {
- asm volatile( "fxsave %0 ; fnclex"
- : "=m" (tsk->thread.i387.fxsave) );
- } else {
- asm volatile( "fnsave %0 ; fwait"
- : "=m" (tsk->thread.i387.fsave) );
- }
+ alternative_input(
+ "fnsave %1 ; fwait ;" GENERIC_NOP2,
+ "fxsave %1 ; fnclex",
+ X86_FEATURE_FXSR,
+ "m" (tsk->thread.i387.fxsave)
+ :"memory");
tsk->thread_info->status &= ~TS_USEDFPU;
}
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 9d722dc..9c5e719 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
/* "non-atomic" versions... */
-static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
+static inline void __set_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m |= (1UL << (nr & 63));
}
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m &= ~(1UL << (nr & 63));
}
-static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
+static inline void __change_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m ^= (1UL << (nr & 63));
}
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
- long old = *m;
- long mask = (1UL << (nr & 63));
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *m;
+ unsigned long mask = (1UL << (nr & 63));
*m = (old | mask);
return ((old & mask) != 0);
}
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
- long old = *m;
- long mask = (1UL << (nr & 63));
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *m;
+ unsigned long mask = (1UL << (nr & 63));
*m = (old & ~mask);
return ((old & mask) != 0);
}
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
- volatile unsigned long *m = addr + (nr >> 6);
- long old = *m;
- long mask = (1UL << (nr & 63));
+ unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *m;
+ unsigned long mask = (1UL << (nr & 63));
*m = (old ^ mask);
return ((old & mask) != 0);
@@ -79,13 +79,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
#define smp_mb__after_clear_bit() barrier()
#endif
-static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
+static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
{
- return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL;
+ return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
}
/* The easy/cheese version for now. */
-static __inline__ unsigned long ffz(unsigned long word)
+static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
@@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __inline__ unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
unsigned long result = 0;
@@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static __inline__ int ffs(int x)
+static inline int ffs(int x)
{
if (!x)
return 0;
@@ -158,7 +158,7 @@ static __inline__ int ffs(int x)
#ifdef ULTRA_HAS_POPULATION_COUNT
-static __inline__ unsigned int hweight64(unsigned long w)
+static inline unsigned int hweight64(unsigned long w)
{
unsigned int res;
@@ -166,7 +166,7 @@ static __inline__ unsigned int hweight64(unsigned long w)
return res;
}
-static __inline__ unsigned int hweight32(unsigned int w)
+static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;
@@ -174,7 +174,7 @@ static __inline__ unsigned int hweight32(unsigned int w)
return res;
}
-static __inline__ unsigned int hweight16(unsigned int w)
+static inline unsigned int hweight16(unsigned int w)
{
unsigned int res;
@@ -182,7 +182,7 @@ static __inline__ unsigned int hweight16(unsigned int w)
return res;
}
-static __inline__ unsigned int hweight8(unsigned int w)
+static inline unsigned int hweight8(unsigned int w)
{
unsigned int res;
@@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *,
#define test_and_clear_le_bit(nr,addr) \
test_and_clear_bit((nr) ^ 0x38, (addr))
-static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
+static inline int test_le_bit(int nr, __const__ unsigned long * addr)
{
int mask;
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
index 2d2b5a1..6194f77 100644
--- a/include/asm-sparc64/ptrace.h
+++ b/include/asm-sparc64/ptrace.h
@@ -94,8 +94,9 @@ struct sparc_trapf {
#define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
#ifdef __KERNEL__
-#define force_successful_syscall_return() \
- set_thread_flag(TIF_SYSCALL_SUCCESS)
+#define force_successful_syscall_return() \
+do { current_thread_info()->syscall_noerror = 1; \
+} while (0)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
#ifdef CONFIG_SMP
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index a1cc94f..4568ee4 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -46,54 +46,14 @@ extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
- int tmp = delta;
-
- __asm__ __volatile__(
- "1:\tlduw [%2], %%g1\n\t"
- "add %%g1, %1, %%g7\n\t"
- "cas [%2], %%g1, %%g7\n\t"
- "cmp %%g1, %%g7\n\t"
- "membar #StoreLoad | #StoreStore\n\t"
- "bne,pn %%icc, 1b\n\t"
- " nop\n\t"
- "mov %%g7, %0\n\t"
- : "=&r" (tmp)
- : "0" (tmp), "r" (sem)
- : "g1", "g7", "memory", "cc");
-
- return tmp + delta;
-}
-
-#define rwsem_atomic_add rwsem_atomic_update
-
-static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
-{
- u32 old = (sem->count & 0xffff0000) | (u32) __old;
- u32 new = (old & 0xffff0000) | (u32) __new;
- u32 prev;
-
-again:
- __asm__ __volatile__("cas [%2], %3, %0\n\t"
- "membar #StoreLoad | #StoreStore"
- : "=&r" (prev)
- : "0" (new), "r" (sem), "r" (old)
- : "memory");
-
- /* To give the same semantics as x86 cmpxchgw, keep trying
- * if only the upper 16-bits changed.
- */
- if (prev != old &&
- ((prev & 0xffff) == (old & 0xffff)))
- goto again;
-
- return prev & 0xffff;
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new)
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
- return cmpxchg(&sem->count,old,new);
+ atomic_add(delta, (atomic_t *)(&sem->count));
}
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 1aa9327..962638c 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -56,52 +56,6 @@ extern void cheetah_enable_pcache(void);
SPITFIRE_HIGHEST_LOCKED_TLBENT : \
CHEETAH_HIGHEST_LOCKED_TLBENT)
-static __inline__ unsigned long spitfire_get_isfsr(void)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=r" (ret)
- : "r" (TLB_SFSR), "i" (ASI_IMMU));
- return ret;
-}
-
-static __inline__ unsigned long spitfire_get_dsfsr(void)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=r" (ret)
- : "r" (TLB_SFSR), "i" (ASI_DMMU));
- return ret;
-}
-
-static __inline__ unsigned long spitfire_get_sfar(void)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=r" (ret)
- : "r" (DMMU_SFAR), "i" (ASI_DMMU));
- return ret;
-}
-
-static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
-{
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
-}
-
-static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
-{
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "membar #Sync"
- : /* no outputs */
- : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
-}
-
/* The data cache is write through, so this just invalidates the
* specified line.
*/
@@ -193,90 +147,6 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
"i" (ASI_ITLB_DATA_ACCESS));
}
-/* Spitfire hardware assisted TLB flushes. */
-
-/* Context level flushes. */
-static __inline__ void spitfire_flush_dtlb_primary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x40), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_primary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x40), "i" (ASI_IMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_dtlb_secondary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x50), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_secondary_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x50), "i" (ASI_IMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x60), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_nucleus_context(void)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (0x60), "i" (ASI_IMMU_DEMAP));
-}
-
-/* Page level flushes. */
-static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page), "i" (ASI_IMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
-}
-
-static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
-{
- __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
- "membar #Sync"
- : /* No outputs */
- : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
-}
-
static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index f9be2c5..ee4bdfc 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -190,24 +190,23 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"wrpr %%g1, %%cwp\n\t" \
"ldx [%%g6 + %3], %%o6\n\t" \
"ldub [%%g6 + %2], %%o5\n\t" \
- "ldx [%%g6 + %4], %%o7\n\t" \
+ "ldub [%%g6 + %4], %%o7\n\t" \
"mov %%g6, %%l2\n\t" \
"wrpr %%o5, 0x0, %%wstate\n\t" \
"ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
"ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
"wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \
- "ldx [%%g6 + %7], %%g4\n\t" \
+ "ldx [%%g6 + %6], %%g4\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \
- "andcc %%o7, %6, %%g0\n\t" \
- "beq,pt %%icc, 1f\n\t" \
+ "brz,pt %%o7, 1f\n\t" \
" mov %%g7, %0\n\t" \
"b,a ret_from_syscall\n\t" \
"1:\n\t" \
: "=&r" (last) \
: "0" (next->thread_info), \
- "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \
- "i" (_TIF_NEWCHILD), "i" (TI_TASK) \
+ "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
+ "i" (TI_CWP), "i" (TI_TASK) \
: "cc", \
"g1", "g2", "g3", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index a1d25c0..352d994 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -47,7 +47,9 @@ struct thread_info {
struct pt_regs *kregs;
struct exec_domain *exec_domain;
int preempt_count; /* 0 => preemptable, <0 => BUG */
- int __pad;
+ __u8 new_child;
+ __u8 syscall_noerror;
+ __u16 __pad;
unsigned long *utraps;
@@ -87,6 +89,8 @@ struct thread_info {
#define TI_KREGS 0x00000028
#define TI_EXEC_DOMAIN 0x00000030
#define TI_PRE_COUNT 0x00000038
+#define TI_NEW_CHILD 0x0000003c
+#define TI_SYS_NOERROR 0x0000003d
#define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048
#define TI_RWIN_SPTRS 0x000003c8
@@ -219,10 +223,10 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
#define TIF_NEWSIGNALS 6 /* wants new-style signals */
#define TIF_32BIT 7 /* 32-bit binary */
-#define TIF_NEWCHILD 8 /* just-spawned child process */
+/* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
-#define TIF_SYSCALL_SUCCESS 11
+/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
@@ -239,10 +243,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS)
#define _TIF_32BIT (1<<TIF_32BIT)
-#define _TIF_NEWCHILD (1<<TIF_NEWCHILD)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h
index ba33a2b..edc8e08 100644
--- a/include/asm-sparc64/timer.h
+++ b/include/asm-sparc64/timer.h
@@ -9,49 +9,8 @@
#include <linux/types.h>
-/* How timers work:
- *
- * On uniprocessors we just use counter zero for the system wide
- * ticker, this performs thread scheduling, clock book keeping,
- * and runs timer based events. Previously we used the Ultra
- * %tick interrupt for this purpose.
- *
- * On multiprocessors we pick one cpu as the master level 10 tick
- * processor. Here this counter zero tick handles clock book
- * keeping and timer events only. Each Ultra has it's level
- * 14 %tick interrupt set to fire off as well, even the master
- * tick cpu runs this locally. This ticker performs thread
- * scheduling, system/user tick counting for the current thread,
- * and also profiling if enabled.
- */
-
#include <linux/config.h>
-/* Two timers, traditionally steered to PIL's 10 and 14 respectively.
- * But since INO packets are used on sun5, we could use any PIL level
- * we like, however for now we use the normal ones.
- *
- * The 'reg' and 'interrupts' properties for these live in nodes named
- * 'counter-timer'. The first of three 'reg' properties describe where
- * the sun5_timer registers are. The other two I have no idea. (XXX)
- */
-struct sun5_timer {
- u64 count0;
- u64 limit0;
- u64 count1;
- u64 limit1;
-};
-
-#define SUN5_LIMIT_ENABLE 0x80000000
-#define SUN5_LIMIT_TOZERO 0x40000000
-#define SUN5_LIMIT_ZRESTART 0x20000000
-#define SUN5_LIMIT_CMASK 0x1fffffff
-
-/* Given a HZ value, set the limit register to so that the timer IRQ
- * gets delivered that often.
- */
-#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz))
-
struct sparc64_tick_ops {
void (*init_tick)(unsigned long);
unsigned long (*get_tick)(void);
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 3781192..f8da7dd 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -197,6 +197,9 @@ struct ip_conntrack_expect
/* Timer function; deletes the expectation. */
struct timer_list timeout;
+ /* Usage count. */
+ atomic_t use;
+
#ifdef CONFIG_IP_NF_NAT_NEEDED
/* This is the original per-proto part, used to map the
* expected connection the way the recipient expects. */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper.h b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
index b1bbba0..3692daa 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_helper.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
@@ -30,9 +30,10 @@ extern int ip_conntrack_helper_register(struct ip_conntrack_helper *);
extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *);
/* Allocate space for an expectation: this is mandatory before calling
- ip_conntrack_expect_related. */
-extern struct ip_conntrack_expect *ip_conntrack_expect_alloc(void);
-extern void ip_conntrack_expect_free(struct ip_conntrack_expect *exp);
+ ip_conntrack_expect_related. You will have to call put afterwards. */
+extern struct ip_conntrack_expect *
+ip_conntrack_expect_alloc(struct ip_conntrack *master);
+extern void ip_conntrack_expect_put(struct ip_conntrack_expect *exp);
/* Add an expected connection: can have more than one per connection */
extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 2f0c085..70c2a9d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -5,7 +5,7 @@
#include <linux/types.h>
#define NETLINK_ROUTE 0 /* Routing/device hook */
-#define NETLINK_SKIP 1 /* Reserved for ENskip */
+#define NETLINK_W1 1 /* 1-wire subsystem */
#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
#define NETLINK_FIREWALL 3 /* Firewalling hook */
#define NETLINK_TCPDIAG 4 /* TCP socket monitoring */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 27348c2..9a28b31 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1872,6 +1872,7 @@
#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
#define PCI_VENDOR_ID_SIIG 0x131f
+#define PCI_SUBVENDOR_ID_SIIG 0x131f
#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000
#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001
#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002
@@ -1909,6 +1910,7 @@
#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
+#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
#define PCI_VENDOR_ID_RADISYS 0x1331
#define PCI_DEVICE_ID_RADISYS_ENP2611 0x0030
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5d4a990..0061c94 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -502,7 +502,8 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
*
* %NULL is returned on a memory allocation failure.
*/
-static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
+ unsigned int __nocast pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index bcb762d..081b1ee 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -41,19 +41,14 @@ enum
TCF_META_ID_LOADAVG_1,
TCF_META_ID_LOADAVG_2,
TCF_META_ID_DEV,
- TCF_META_ID_INDEV,
- TCF_META_ID_REALDEV,
TCF_META_ID_PRIORITY,
TCF_META_ID_PROTOCOL,
- TCF_META_ID_SECURITY, /* obsolete */
TCF_META_ID_PKTTYPE,
TCF_META_ID_PKTLEN,
TCF_META_ID_DATALEN,
TCF_META_ID_MACLEN,
TCF_META_ID_NFMARK,
TCF_META_ID_TCINDEX,
- TCF_META_ID_TCVERDICT,
- TCF_META_ID_TCCLASSID,
TCF_META_ID_RTCLASSID,
TCF_META_ID_RTIIF,
TCF_META_ID_SK_FAMILY,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 4a26adf..e1d5ec1 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -167,15 +167,12 @@ void sctp_unhash_established(struct sctp_association *);
void sctp_hash_endpoint(struct sctp_endpoint *);
void sctp_unhash_endpoint(struct sctp_endpoint *);
struct sock *sctp_err_lookup(int family, struct sk_buff *,
- struct sctphdr *, struct sctp_endpoint **,
- struct sctp_association **,
+ struct sctphdr *, struct sctp_association **,
struct sctp_transport **);
-void sctp_err_finish(struct sock *, struct sctp_endpoint *,
- struct sctp_association *);
+void sctp_err_finish(struct sock *, struct sctp_association *);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
void sctp_icmp_proto_unreachable(struct sock *sk,
- struct sctp_endpoint *ep,
struct sctp_association *asoc,
struct sctp_transport *t);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 029522a..868ef88 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -803,7 +803,7 @@ struct xfrm_algo_desc {
/* XFRM tunnel handlers. */
struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb, void *info);
+ void (*err_handler)(struct sk_buff *skb, __u32 info);
};
struct xfrm6_tunnel {