summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kvm/emulate.c82
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/xen/multicalls.c12
10 files changed, 137 insertions, 55 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb..7c3a95e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
+#include <asm/dma.h>
#include <asm/amd_iommu_proto.h>
#include <asm/amd_iommu_types.h>
#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
if (pdev)
dev_data->alias = &pdev->dev;
+ else {
+ kfree(dev_data);
+ return -ENOTSUPP;
+ }
atomic_set(&dev_data->bind, 0);
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
return 0;
}
+static void iommu_ignore_device(struct device *dev)
+{
+ u16 devid, alias;
+
+ devid = get_device_id(dev);
+ alias = amd_iommu_alias_table[devid];
+
+ memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+
+ amd_iommu_rlookup_table[devid] = NULL;
+ amd_iommu_rlookup_table[alias] = NULL;
+}
+
static void iommu_uninit_device(struct device *dev)
{
kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
continue;
ret = iommu_init_device(&pdev->dev);
- if (ret)
+ if (ret == -ENOTSUPP)
+ iommu_ignore_device(&pdev->dev);
+ else if (ret)
goto out_free;
}
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
.dma_supported = amd_iommu_dma_supported,
};
+static unsigned device_dma_ops_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ unsigned unhandled = 0;
+
+ for_each_pci_dev(pdev) {
+ if (!check_device(&pdev->dev)) {
+ unhandled += 1;
+ continue;
+ }
+
+ pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ }
+
+ return unhandled;
+}
+
/*
* The function which clues the AMD IOMMU driver into dma_ops.
*/
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void)
{
struct amd_iommu *iommu;
- int ret;
+ int ret, unhandled;
/*
* first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
swiotlb = 0;
/* Make the driver finally visible to the drivers */
- dma_ops = &amd_iommu_dma_ops;
+ unhandled = device_dma_ops_init();
+ if (unhandled && max_pfn > MAX_DMA32_PFN) {
+ /* There are unhandled devices - initialize swiotlb for them */
+ swiotlb = 1;
+ }
amd_iommu_stats_init();
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21..bfc8453 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
{
u8 *p = (u8 *)h;
u8 *end = p, flags = 0;
- u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
- u32 ext_flags = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0;
+ u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
/* Initializes the device->iommu mapping for the driver */
static int __init init_iommu_devices(struct amd_iommu *iommu)
{
- u16 i;
+ u32 i;
for (i = iommu->first_device; i <= iommu->last_device; ++i)
set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
*/
static void init_device_table(void)
{
- u16 devid;
+ u32 devid;
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af8..b9338b8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
/*
* If mask=1, the LVT entry does not generate interrupts while mask=0
- * enables the vector. See also the BKDGs.
+ * enables the vector. See also the BKDGs. Must be called with
+ * preemption disabled.
*/
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc84..9aeb78a 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/of_pci.h>
+#include <linux/initrd.h>
#include <asm/hpet.h>
#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
void __init add_dtb(u64 data)
{
initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d12878..a3d0dc5 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
set_user_gs(regs, 0);
regs->fs = 0;
- set_fs(USER_DS);
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd92..ca6f7ab 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
regs->cs = _cs;
regs->ss = _ss;
regs->flags = X86_EFLAGS_IF;
- set_fs(USER_DS);
/*
* Free the old FP and other extended state
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33a0c11..9fd3137 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init();
+ /*
+ * Wait until the cpu which brought this one up marked it
+ * online before enabling interrupts. If we don't do that then
+ * we can end up waking up the softirq thread before this cpu
+ * reached the active state, which makes the scheduler unhappy
+ * and schedule the softirq thread on the wrong cpu. This is
+ * only observable with forced threaded interrupts, but in
+ * theory it could also happen w/o them. It's just way harder
+ * to achieve.
+ */
+ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
+ cpu_relax();
+
/* enable local interrupts */
local_irq_enable();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477..6df88c7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
#define DstDI (5<<1) /* Destination is in ES:(E)DI */
#define DstMem64 (6<<1) /* 64bit memory operand */
#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
-#define DstMask (7<<1)
+#define DstDX (8<<1) /* Destination is in DX register */
+#define DstMask (0xf<<1)
/* Source operand type. */
-#define SrcNone (0<<4) /* No source operand. */
-#define SrcReg (1<<4) /* Register operand. */
-#define SrcMem (2<<4) /* Memory operand. */
-#define SrcMem16 (3<<4) /* Memory operand (16-bit). */
-#define SrcMem32 (4<<4) /* Memory operand (32-bit). */
-#define SrcImm (5<<4) /* Immediate operand. */
-#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
-#define SrcOne (7<<4) /* Implied '1' */
-#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
-#define SrcImmU (9<<4) /* Immediate operand, unsigned */
-#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
-#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
-#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
-#define SrcAcc (0xd<<4) /* Source Accumulator */
-#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
-#define SrcMask (0xf<<4)
+#define SrcNone (0<<5) /* No source operand. */
+#define SrcReg (1<<5) /* Register operand. */
+#define SrcMem (2<<5) /* Memory operand. */
+#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
+#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
+#define SrcImm (5<<5) /* Immediate operand. */
+#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
+#define SrcOne (7<<5) /* Implied '1' */
+#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
+#define SrcImmU (9<<5) /* Immediate operand, unsigned */
+#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
+#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
+#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
+#define SrcAcc (0xd<<5) /* Source Accumulator */
+#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
+#define SrcDX (0xf<<5) /* Source is in DX register */
+#define SrcMask (0xf<<5)
/* Generic ModRM decode. */
-#define ModRM (1<<8)
+#define ModRM (1<<9)
/* Destination is only written; never read. */
-#define Mov (1<<9)
-#define BitOp (1<<10)
-#define MemAbs (1<<11) /* Memory operand is absolute displacement */
-#define String (1<<12) /* String instruction (rep capable) */
-#define Stack (1<<13) /* Stack instruction (push/pop) */
-#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */
-#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
-#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */
-#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */
-#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */
-#define Sse (1<<17) /* SSE Vector instruction */
+#define Mov (1<<10)
+#define BitOp (1<<11)
+#define MemAbs (1<<12) /* Memory operand is absolute displacement */
+#define String (1<<13) /* String instruction (rep capable) */
+#define Stack (1<<14) /* Stack instruction (push/pop) */
+#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
+#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
+#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
+#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
+#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
+#define Sse (1<<18) /* SSE Vector instruction */
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
- D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */
- D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */
+ D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
+ D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte)),
/* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
/* 0xE8 - 0xEF */
D(SrcImm | Stack), D(SrcImm | ImplicitOps),
D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
- D2bvIP(SrcNone | DstAcc, in, check_perm_in),
- D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out),
+ D2bvIP(SrcDX | DstAcc, in, check_perm_in),
+ D2bvIP(SrcAcc | DstDX, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3613,6 +3615,12 @@ done_prefixes:
memop.bytes = c->op_bytes + 2;
goto srcmem_common;
break;
+ case SrcDX:
+ c->src.type = OP_REG;
+ c->src.bytes = 2;
+ c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&c->src);
+ break;
}
if (rc != X86EMUL_CONTINUE)
@@ -3682,6 +3690,12 @@ done_prefixes:
c->dst.addr.mem.seg = VCPU_SREG_ES;
c->dst.val = 0;
break;
+ case DstDX:
+ c->dst.type = OP_REG;
+ c->dst.bytes = 2;
+ c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&c->dst);
+ break;
case ImplicitOps:
/* Special instructions do their own operand decoding. */
default:
@@ -4027,7 +4041,6 @@ special_insn:
break;
case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */
- c->src.val = c->regs[VCPU_REGS_RDX];
do_io_in:
if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
&c->dst.val))
@@ -4035,7 +4048,6 @@ special_insn:
break;
case 0xee: /* out dx,al */
case 0xef: /* out dx,(e/r)ax */
- c->dst.val = c->regs[VCPU_REGS_RDX];
do_io_out:
ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
&c->src.val, 1);
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a56..9cbb710 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
return 0;
}
+/*
+ * This runs only on the current cpu. We try to find an LVT offset and
+ * setup the local APIC. For this we must disable preemption. On
+ * success we initialize all nodes with this offset. This updates then
+ * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
+ * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
+ * amd_cpu_shutdown() using the new offset.
+ */
static int force_ibs_eilvt_setup(void)
{
int offset;
int ret;
- /*
- * find the next free available EILVT entry, skip offset 0,
- * pin search to this cpu
- */
preempt_disable();
+ /* find the next free available EILVT entry, skip offset 0 */
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
if (get_eilvt(offset))
break;
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 8bff7e7..1b2b73f 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args)
unsigned argidx = roundup(b->argidx, sizeof(u64));
BUG_ON(preemptible());
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
if (b->mcidx == MC_BATCH ||
- (argidx + args) > MC_ARGS) {
+ (argidx + args) >= MC_ARGS) {
mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args)
ret.args = &b->args[argidx];
b->argidx = argidx + args;
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
return ret;
}
@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
struct multicall_space ret = { NULL, NULL };
BUG_ON(preemptible());
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
if (b->mcidx == 0)
return ret;
@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
if (b->entries[b->mcidx - 1].op != op)
return ret;
- if ((b->argidx + size) > MC_ARGS)
+ if ((b->argidx + size) >= MC_ARGS)
return ret;
ret.mc = &b->entries[b->mcidx - 1];
ret.args = &b->args[b->argidx];
b->argidx += size;
- BUG_ON(b->argidx > MC_ARGS);
+ BUG_ON(b->argidx >= MC_ARGS);
return ret;
}