summaryrefslogtreecommitdiff
path: root/arch/x86/pci
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-11-08 19:13:35 (GMT)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-14 15:16:54 (GMT)
commitc55fa78b13b32d3f19e19cd0c8b9378fdc09e521 (patch)
tree8595a0697b1177763ba42b28908e276a03a8725a /arch/x86/pci
parenta6360dd37e1a144ed11e6548371bade559a1e4df (diff)
downloadlinux-c55fa78b13b32d3f19e19cd0c8b9378fdc09e521.tar.xz
xen/pci: Add xen_[find|register|unregister]_device_domain_owner functions.
When the Xen PCI backend is told to enable or disable MSI/MSI-X functions, the initial domain performs these operations. The initial domain needs to know which domain (guest) is going to use the PCI device so when it makes the appropiate hypercall to retrieve the MSI/MSI-X vector it will also assign the PCI device to the appropiate domain (guest). This boils down to us needing a mechanism to find, set and unset the domain id that will be using the device. [v2: EXPORT_SYMBOL -> EXPORT_SYMBOL_GPL.] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/pci')
-rw-r--r--arch/x86/pci/xen.c73
1 files changed, 73 insertions, 0 deletions
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index e37b407..6075f2d 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -461,3 +461,76 @@ void __init xen_setup_pirqs(void)
}
}
#endif
+
+struct xen_device_domain_owner {
+ domid_t domain;
+ struct pci_dev *dev;
+ struct list_head list;
+};
+
+static DEFINE_SPINLOCK(dev_domain_list_spinlock);
+static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
+
+static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
+{
+ struct xen_device_domain_owner *owner;
+
+ list_for_each_entry(owner, &dev_domain_list, list) {
+ if (owner->dev == dev)
+ return owner;
+ }
+ return NULL;
+}
+
+int xen_find_device_domain_owner(struct pci_dev *dev)
+{
+ struct xen_device_domain_owner *owner;
+ int domain = -ENODEV;
+
+ spin_lock(&dev_domain_list_spinlock);
+ owner = find_device(dev);
+ if (owner)
+ domain = owner->domain;
+ spin_unlock(&dev_domain_list_spinlock);
+ return domain;
+}
+EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
+
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
+{
+ struct xen_device_domain_owner *owner;
+
+ owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
+ if (!owner)
+ return -ENODEV;
+
+ spin_lock(&dev_domain_list_spinlock);
+ if (find_device(dev)) {
+ spin_unlock(&dev_domain_list_spinlock);
+ kfree(owner);
+ return -EEXIST;
+ }
+ owner->domain = domain;
+ owner->dev = dev;
+ list_add_tail(&owner->list, &dev_domain_list);
+ spin_unlock(&dev_domain_list_spinlock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
+
+int xen_unregister_device_domain_owner(struct pci_dev *dev)
+{
+ struct xen_device_domain_owner *owner;
+
+ spin_lock(&dev_domain_list_spinlock);
+ owner = find_device(dev);
+ if (!owner) {
+ spin_unlock(&dev_domain_list_spinlock);
+ return -ENODEV;
+ }
+ list_del(&owner->list);
+ spin_unlock(&dev_domain_list_spinlock);
+ kfree(owner);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);