summaryrefslogtreecommitdiff
path: root/arch/x86/lguest/boot.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-11-05 10:55:57 (GMT)
committerRusty Russell <rusty@rustcorp.com.au>2007-11-05 10:55:57 (GMT)
commit633872b980f55f40a5e7de374f26970e41e2137b (patch)
tree676e604142f0a536fd54d47da03a67d15bedbf45 /arch/x86/lguest/boot.c
parentfad23fc78b959dae89768e523c3a6f5edb83bbe9 (diff)
downloadlinux-633872b980f55f40a5e7de374f26970e41e2137b.tar.xz
lguest: tidy up documentation
After Adrian Bunk's "make async_hcall static" moved things around, update comments to match (aka "make Guest"). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/x86/lguest/boot.c')
-rw-r--r--arch/x86/lguest/boot.c43
1 files changed, 21 insertions, 22 deletions
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e6023b8..92c5611 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -93,27 +93,7 @@ struct lguest_data lguest_data = {
};
static cycle_t clock_base;
-/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
- * real optimization trick!
- *
- * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
- * them as a batch when lazy_mode is eventually turned off. Because hypercalls
- * are reasonably expensive, batching them up makes sense. For example, a
- * large munmap might update dozens of page table entries: that code calls
- * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
- * lguest_leave_lazy_mode().
- *
- * So, when we're in lazy mode, we call async_hypercall() to store the call for
- * future processing. When lazy mode is turned off we issue a hypercall to
- * flush the stored calls.
- */
-static void lguest_leave_lazy_mode(void)
-{
- paravirt_leave_lazy(paravirt_get_lazy_mode());
- hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
-}
-
-/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
+/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
* ring buffer of stored hypercalls which the Host will run though next time we
* do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
* arguments, and a "hcall_status" word which is 0 if the call is ready to go,
@@ -151,6 +131,18 @@ static void async_hcall(unsigned long call, unsigned long arg1,
local_irq_restore(flags);
}
+/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
+ * real optimization trick!
+ *
+ * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
+ * them as a batch when lazy_mode is eventually turned off. Because hypercalls
+ * are reasonably expensive, batching them up makes sense. For example, a
+ * large munmap might update dozens of page table entries: that code calls
+ * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
+ * lguest_leave_lazy_mode().
+ *
+ * So, when we're in lazy mode, we call async_hcall() to store the call for
+ * future processing. */
static void lazy_hcall(unsigned long call,
unsigned long arg1,
unsigned long arg2,
@@ -161,7 +153,14 @@ static void lazy_hcall(unsigned long call,
else
async_hcall(call, arg1, arg2, arg3);
}
-/*:*/
+
+/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
+ * issue a hypercall to flush any stored calls. */
+static void lguest_leave_lazy_mode(void)
+{
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
+ hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
+}
/*G:033
* After that diversion we return to our first native-instruction