summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2010-05-26 01:36:58 (GMT)
committerHerbert Xu <herbert@gondor.apana.org.au>2010-05-26 01:36:58 (GMT)
commit50d1e9302bab7d35dae7146f8c468e0943015616 (patch)
treefa05320f4a297bd582686574cf94ba444e264b3f /arch/ia64
parent7cc2835083aedfde42de02301005a5555e00c4b1 (diff)
parentdc4ccfd15d4fc7a91ddf222bc5eed5cc4bcf10e6 (diff)
downloadlinux-fsl-qoriq-50d1e9302bab7d35dae7146f8c468e0943015616.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/crypto-2.6
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/hp/common/sba_iommu.c8
-rw-r--r--arch/ia64/include/asm/atomic.h4
-rw-r--r--arch/ia64/include/asm/bitops.h11
-rw-r--r--arch/ia64/include/asm/mmzone.h4
-rw-r--r--arch/ia64/include/asm/percpu.h5
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/kernel/acpi.c8
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c10
-rw-r--r--arch/ia64/kernel/irq_ia64.c9
-rw-r--r--arch/ia64/kernel/salinfo.c5
-rw-r--r--arch/ia64/kernel/time.c12
-rw-r--r--arch/ia64/kernel/topology.c4
-rw-r--r--arch/ia64/kernel/unaligned.c24
-rw-r--r--arch/ia64/kvm/kvm-ia64.c8
-rw-r--r--arch/ia64/kvm/vmm.c2
-rw-r--r--arch/ia64/mm/fault.c13
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c13
18 files changed, 61 insertions, 84 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 4d4f418..9676100 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -59,9 +59,6 @@ config NEED_DMA_MAP_STATE
config SWIOTLB
bool
-config IOMMU_HELPER
- bool
-
config GENERIC_LOCKBREAK
def_bool n
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e14c492..4ce8d13 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
- struct acpi_device_info *dev_info;
+ struct acpi_device_info *adi;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
- status = acpi_get_object_info(device->handle, &dev_info);
+ status = acpi_get_object_info(device->handle, &adi);
if (ACPI_FAILURE(status))
return 1;
@@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
- if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) {
+ if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- kfree(dev_info);
+ kfree(adi);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 88405cb..4e19484 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,8 +21,8 @@
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 6ebc229..9da3df6 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -437,17 +437,18 @@ __fls (unsigned long x)
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-static __inline__ unsigned long
-hweight64 (unsigned long x)
+static __inline__ unsigned long __arch_hweight64(unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x);
return result;
}
-#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
-#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
-#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
+#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
+#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
+#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
+
+#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
index f2ca320..e0de617 100644
--- a/arch/ia64/include/asm/mmzone.h
+++ b/arch/ia64/include/asm/mmzone.h
@@ -19,16 +19,12 @@
static inline int pfn_to_nid(unsigned long pfn)
{
-#ifdef CONFIG_NUMA
extern int paddr_to_nid(unsigned long);
int nid = paddr_to_nid(pfn << PAGE_SHIFT);
if (nid < 0)
return 0;
else
return nid;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index f7c00a5..1bd4082 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -39,7 +39,10 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
-#define __ia64_per_cpu_var(var) var
+#define __ia64_per_cpu_var(var) (*({ \
+ __verify_pcpu_ptr(&(var)); \
+ ((typeof(var) __kernel __force *)&(var)); \
+}))
#include <asm-generic/percpu.h>
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 8ce2e38..b6a5ba2 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -102,7 +102,7 @@ struct thread_info {
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
#define TIF_FREEZE 20 /* is freezing for suspend */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 4d1a7e9..c6c90f3 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -785,6 +785,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
return 0;
}
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
+{
+ if (isa_irq >= 16)
+ return -1;
+ *gsi = isa_irq;
+ return 0;
+}
+
/*
* ACPI based hotplug CPU support
*/
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index b0b4e6e..22f6152 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -113,7 +113,7 @@ processor_get_freq (
dprintk("processor_get_freq\n");
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu)
goto migrate_end;
@@ -121,7 +121,7 @@ processor_get_freq (
ret = processor_get_pstate(&value);
if (ret) {
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
printk(KERN_WARNING "get performance failed with error %d\n",
ret);
ret = 0;
@@ -131,7 +131,7 @@ processor_get_freq (
ret = (clock_freq*1000);
migrate_end:
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return ret;
}
@@ -151,7 +151,7 @@ processor_set_freq (
dprintk("processor_set_freq\n");
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu) {
retval = -EAGAIN;
goto migrate_end;
@@ -208,7 +208,7 @@ processor_set_freq (
retval = 0;
migrate_end:
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return (retval);
}
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 6404793..f14c35f 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -29,6 +29,7 @@
#include <linux/threads.h>
#include <linux/bitops.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <asm/delay.h>
#include <asm/intrinsics.h>
@@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
sp = ia64_getreg(_IA64_REG_SP);
if ((sp - bsp) < 1024) {
- static unsigned char count;
- static long last_time;
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
- if (time_after(jiffies, last_time + 5 * HZ))
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
+ if (__ratelimit(&ratelimit)) {
printk("ia64_handle_irq: DANGER: less than "
"1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e6676fc..aa8b5fa 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -404,10 +404,9 @@ static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
cpumask_t save_cpus_allowed = current->cpus_allowed;
- cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
- set_cpus_allowed(current, new_cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
(*fn)(arg);
- set_cpus_allowed(current, save_cpus_allowed);
+ set_cpus_allowed_ptr(current, &save_cpus_allowed);
}
static void
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 47a1927..653b3c4 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -430,18 +430,16 @@ static int __init rtc_init(void)
}
module_init(rtc_init);
+void read_persistent_clock(struct timespec *ts)
+{
+ efi_gettimeofday(ts);
+}
+
void __init
time_init (void)
{
register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
- efi_gettimeofday(&xtime);
ia64_init_itm();
-
- /*
- * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
- * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
}
/*
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 28f299d..0baa1bb 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
return 0;
oldmask = current->cpus_allowed;
- retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (unlikely(retval))
return retval;
retval = cpu_cache_sysfs_init(cpu);
- set_cpus_allowed(current, oldmask);
+ set_cpus_allowed_ptr(current, &oldmask);
if (unlikely(retval < 0))
return retval;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 776dd40..622772b 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tty.h>
+#include <linux/ratelimit.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
@@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
/*
* Make sure we log the unaligned access, so that user/sysadmin can notice it and
* eventually fix the program. However, we don't want to do that for every access so we
- * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be
- * either...
+ * pace it with jiffies.
*/
-static int
-within_logging_rate_limit (void)
-{
- static unsigned long count, last_time;
-
- if (time_after(jiffies, last_time + 5 * HZ))
- count = 0;
- if (count < 5) {
- last_time = jiffies;
- count++;
- return 1;
- }
- return 0;
-
-}
+static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
@@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (!no_unaligned_warning &&
!(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
- within_logging_rate_limit())
+ __ratelimit(&logging_rate_limit))
{
char buf[200]; /* comm[] is at most 16 bytes... */
size_t len;
@@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
}
}
} else {
- if (within_logging_rate_limit()) {
+ if (__ratelimit(&logging_rate_limit)) {
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
ifa, regs->cr_iip + ipsr->ri);
if (unaligned_dump_stack)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 7f3c0a2..d5f4e91 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -979,11 +979,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
+ r = -ENXIO;
if (irqchip_in_kernel(kvm)) {
__s32 status;
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
if (ioctl == KVM_IRQ_LINE_STATUS) {
+ r = -EFAULT;
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
@@ -1379,7 +1381,7 @@ static void kvm_release_vm_pages(struct kvm *kvm)
int i, j;
unsigned long base_gfn;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;
@@ -1535,8 +1537,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
goto out;
if (copy_to_user(user_stack, stack,
- sizeof(struct kvm_ia64_vcpu_stack)))
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ r = -EFAULT;
goto out;
+ }
break;
}
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 7a62f75..f0b9cac 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -51,7 +51,7 @@ static int __init kvm_vmm_init(void)
vmm_fpswa_interface = fpswa_interface;
/*Register vmm data to kvm side*/
- return kvm_init(&vmm_info, 1024, THIS_MODULE);
+ return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
}
static void __exit kvm_vmm_exit(void)
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 19261a9..0799fea 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if ((vma->vm_flags & mask) != mask)
goto bad_area;
- survive:
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
@@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 55ac3c4..fa1ecee 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -30,7 +30,6 @@
#include <linux/miscdevice.h>
#include <linux/utsname.h>
#include <linux/cpumask.h>
-#include <linux/smp_lock.h>
#include <linux/nodemask.h>
#include <linux/smp.h>
#include <linux/mutex.h>
@@ -629,9 +628,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
else {
/* migrate the task before calling SAL */
save_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
sn_hwperf_call_sal(op_info);
- set_cpus_allowed(current, save_allowed);
+ set_cpus_allowed_ptr(current, &save_allowed);
}
}
r = op_info->ret;
@@ -682,8 +681,7 @@ static int sn_hwperf_map_err(int hwperf_err)
/*
* ioctl for "sn_hwperf" misc device
*/
-static int
-sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
+static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg)
{
struct sn_hwperf_ioctl_args a;
struct cpuinfo_ia64 *cdata;
@@ -699,8 +697,6 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
int i;
int j;
- unlock_kernel();
-
/* only user requests are allowed here */
if ((op & SN_HWPERF_OP_MASK) < 10) {
r = -EINVAL;
@@ -859,12 +855,11 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
error:
vfree(p);
- lock_kernel();
return r;
}
static const struct file_operations sn_hwperf_fops = {
- .ioctl = sn_hwperf_ioctl,
+ .unlocked_ioctl = sn_hwperf_ioctl,
};
static struct miscdevice sn_hwperf_dev = {