Apply patch proposed in PR port-xen/45975 (this does not solve the exact trunk
authorbouyer <bouyer@NetBSD.org>
Fri, 17 Feb 2012 18:40:18 +0000
branchtrunk
changeset 209269 abb0b3ee178e
parent 209268 366ac9866d70
child 209270 146bac00019c
Apply patch proposed in PR port-xen/45975 (this does not solve the exact problem reported here but is part of the solution): xen_kpm_sync() is not working as expected, leading to races between CPUs. 1 the check (xpq_cpu != &x86_curcpu) is always false because we have different x86_curcpu symbols with different addresses in the kernel. Fortunably, all addresses dissaemble to the same code. Because of this we always use the code intended for bootstrap, which doesn't use cross-calls or lock. 2 once 1 above is fixed, xen_kpm_sync() will use xcalls to sync other CPUs, which cause it to sleep and pmap.c doesn't like that. It triggers this KASSERT() in pmap_unmap_ptes(): KASSERT(pmap->pm_ncsw == curlwp->l_ncsw); 3 pmap->pm_cpus is not safe for the purpose of xen_kpm_sync(), which needs to know on which CPU a pmap is loaded *now*: pmap->pm_cpus is cleared before cpu_load_pmap() is called to switch to a new pmap, leaving a window where a pmap is still in a CPU's ci_kpm_pdir but not in pm_cpus. As a virtual CPU may be preempted by the hypervisor at any time, it can be large enough to let another CPU free the PTP and reuse it as a normal page. To fix 2), avoid cross-calls and IPIs completely, and instead use a mutex to update all CPU's ci_kpm_pdir from the local CPU. It's safe because we just need to update the table page, a tlbflush IPI will happen later. As a side effect, we don't need a different code for bootstrap, fixing 1). The mutex added to struct cpu needs a small headers reorganisation. to fix 3), introduce a pm_xen_ptp_cpus which is updated from cpu_pmap_load(), whith the ci_kpm_mtx mutex held. Checking it with ci_kpm_mtx held will avoid overwriting the wrong pmap's ci_kpm_pdir. While there I removed the unused pmap_is_active() function; and added some more details to DIAGNOSTIC panics.
sys/arch/x86/include/cpu.h
sys/arch/x86/include/pmap.h
sys/arch/x86/x86/cpu.c
sys/arch/x86/x86/pmap.c
sys/arch/xen/include/hypervisor.h
sys/arch/xen/include/intr.h
sys/arch/xen/x86/cpu.c
sys/arch/xen/x86/x86_xpmap.c
sys/arch/xen/x86/xen_ipi.c
sys/arch/xen/x86/xen_pmap.c
--- a/sys/arch/x86/include/cpu.h	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/x86/include/cpu.h	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.47 2012/02/12 14:38:18 jym Exp $	*/
+/*	$NetBSD: cpu.h,v 1.48 2012/02/17 18:40:18 bouyer Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -70,6 +70,7 @@
 #ifdef XEN
 #include <xen/xen-public/xen.h>
 #include <xen/xen-public/event_channel.h>
+#include <sys/mutex.h>
 #endif /* XEN */
 
 struct intrsource;
@@ -185,6 +186,7 @@
 	/* Currently active user PGD (can't use rcr3() with Xen) */
 	pd_entry_t *	ci_kpm_pdir;	/* per-cpu PMD (va) */
 	paddr_t		ci_kpm_pdirpa;  /* per-cpu PMD (pa) */
+	kmutex_t	ci_kpm_mtx;
 #if defined(__x86_64__)
 	/* per-cpu version of normal_pdes */
 	pd_entry_t *	ci_normal_pdes[3]; /* Ok to hardcode. only for x86_64 && XEN */
@@ -317,7 +319,7 @@
 void cpu_boot_secondary_processors(void);
 void cpu_init_idle_lwps(void);
 void cpu_init_msrs(struct cpu_info *, bool);
-void cpu_load_pmap(struct pmap *);
+void cpu_load_pmap(struct pmap *, struct pmap *);
 void cpu_broadcast_halt(void);
 void cpu_kick(struct cpu_info *);
 
--- a/sys/arch/x86/include/pmap.h	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/x86/include/pmap.h	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.49 2011/12/04 16:24:13 chs Exp $	*/
+/*	$NetBSD: pmap.h,v 1.50 2012/02/17 18:40:18 bouyer Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -165,6 +165,8 @@
 	uint32_t pm_cpus;		/* mask of CPUs using pmap */
 	uint32_t pm_kernel_cpus;	/* mask of CPUs using kernel part
 					 of pmap */
+	uint32_t pm_xen_ptp_cpus;	/* mask of CPUs which have this pmap's
+					 ptp mapped */
 	uint64_t pm_ncsw;		/* for assertions */
 	struct vm_page *pm_gc_ptp;	/* pages from pmap g/c */
 };
--- a/sys/arch/x86/x86/cpu.c	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/x86/x86/cpu.c	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.96 2011/10/18 05:16:02 jruoho Exp $	*/
+/*	$NetBSD: cpu.c,v 1.97 2012/02/17 18:40:19 bouyer Exp $	*/
 
 /*-
  * Copyright (c) 2000, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -62,7 +62,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.96 2011/10/18 05:16:02 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.97 2012/02/17 18:40:19 bouyer Exp $");
 
 #include "opt_ddb.h"
 #include "opt_mpbios.h"		/* for MPDEBUG */
@@ -1228,7 +1228,7 @@
  * Loads pmap for the current CPU.
  */
 void
-cpu_load_pmap(struct pmap *pmap)
+cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
 {
 #ifdef PAE
 	int i, s;
--- a/sys/arch/x86/x86/pmap.c	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/x86/x86/pmap.c	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.164 2012/02/11 18:59:41 chs Exp $	*/
+/*	$NetBSD: pmap.c,v 1.165 2012/02/17 18:40:19 bouyer Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.164 2012/02/11 18:59:41 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.165 2012/02/17 18:40:19 bouyer Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -561,7 +561,6 @@
 static void		 pmap_free_ptp(struct pmap *, struct vm_page *,
 				       vaddr_t, pt_entry_t *,
 				       pd_entry_t * const *);
-static bool		 pmap_is_active(struct pmap *, struct cpu_info *, bool);
 static bool		 pmap_remove_pte(struct pmap *, struct vm_page *,
 					 pt_entry_t *, vaddr_t,
 					 struct pv_entry **);
@@ -680,19 +679,6 @@
 }
 
 /*
- * pmap_is_active: is this pmap loaded into the specified processor's %cr3?
- */
-
-inline static bool
-pmap_is_active(struct pmap *pmap, struct cpu_info *ci, bool kernel)
-{
-
-	return (pmap == pmap_kernel() ||
-	    (pmap->pm_cpus & ci->ci_cpumask) != 0 ||
-	    (kernel && (pmap->pm_kernel_cpus & ci->ci_cpumask) != 0));
-}
-
-/*
  *	Add a reference to the specified pmap.
  */
 
@@ -781,7 +767,7 @@
 		ci->ci_tlbstate = TLBSTATE_VALID;
 		atomic_or_32(&pmap->pm_cpus, cpumask);
 		atomic_or_32(&pmap->pm_kernel_cpus, cpumask);
-		cpu_load_pmap(pmap);
+		cpu_load_pmap(pmap, curpmap);
 	}
 	pmap->pm_ncsw = l->l_ncsw;
 	*pmap2 = curpmap;
@@ -2239,6 +2225,7 @@
 	pmap->pm_flags = 0;
 	pmap->pm_cpus = 0;
 	pmap->pm_kernel_cpus = 0;
+	pmap->pm_xen_ptp_cpus = 0;
 	pmap->pm_gc_ptp = NULL;
 
 	/* init the LDT */
@@ -2329,9 +2316,26 @@
 	}
 
 #ifdef DIAGNOSTIC
-	for (CPU_INFO_FOREACH(cii, ci))
+	for (CPU_INFO_FOREACH(cii, ci)) {
 		if (ci->ci_pmap == pmap)
 			panic("destroying pmap being used");
+#if defined(XEN) && defined(__x86_64__)
+		for (i = 0; i < PDIR_SLOT_PTE; i++) {
+			if (pmap->pm_pdir[i] != 0 &&
+			    ci->ci_kpm_pdir[i] == pmap->pm_pdir[i]) {
+				printf("pmap_destroy(%p) pmap_kernel %p "
+				    "curcpu %d cpu %d ci_pmap %p "
+				    "ci->ci_kpm_pdir[%d]=%" PRIx64
+				    " pmap->pm_pdir[%d]=%" PRIx64 "\n",
+				    pmap, pmap_kernel(), curcpu()->ci_index,
+				    ci->ci_index, ci->ci_pmap,
+				    i, ci->ci_kpm_pdir[i],
+				    i, pmap->pm_pdir[i]);
+				panic("pmap_destroy: used pmap");
+			}
+		}
+#endif
+	}
 #endif /* DIAGNOSTIC */
 
 	/*
@@ -2760,7 +2764,7 @@
 	lldt(pmap->pm_ldt_sel);
 
 	u_int gen = uvm_emap_gen_return();
-	cpu_load_pmap(pmap);
+	cpu_load_pmap(pmap, oldpmap);
 	uvm_emap_update(gen);
 
 	ci->ci_want_pmapload = 0;
--- a/sys/arch/xen/include/hypervisor.h	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/xen/include/hypervisor.h	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypervisor.h,v 1.36 2011/12/07 15:47:42 cegger Exp $	*/
+/*	$NetBSD: hypervisor.h,v 1.37 2012/02/17 18:40:19 bouyer Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -91,7 +91,6 @@
 #include <xen/xen-public/io/netif.h>
 #include <xen/xen-public/io/blkif.h>
 
-#include <machine/cpu.h>
 #include <machine/hypercalls.h>
 
 #undef u8
--- a/sys/arch/xen/include/intr.h	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/xen/include/intr.h	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: intr.h,v 1.33 2011/08/11 17:58:59 cherry Exp $	*/
+/*	$NetBSD: intr.h,v 1.34 2012/02/17 18:40:19 bouyer Exp $	*/
 /*	NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp	*/
 
 /*-
@@ -39,12 +39,13 @@
 #include <xen/xen.h>
 #include <xen/hypervisor.h>
 #include <xen/evtchn.h>
-#include <machine/cpu.h>
 #include <machine/pic.h>
 #include <sys/evcnt.h>
 
 #include "opt_xen.h"
 
+
+struct cpu_info;
 /*
  * Struct describing an event channel. 
  */
@@ -152,8 +153,6 @@
  * Stub declarations.
  */
 
-struct cpu_info;
-
 struct pcibus_attach_args;
 
 #ifdef MULTIPROCESSOR
--- a/sys/arch/xen/x86/cpu.c	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/xen/x86/cpu.c	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.80 2012/02/13 23:54:58 jym Exp $	*/
+/*	$NetBSD: cpu.c,v 1.81 2012/02/17 18:40:20 bouyer Exp $	*/
 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
 
 /*-
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.80 2012/02/13 23:54:58 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.81 2012/02/17 18:40:20 bouyer Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -595,6 +595,9 @@
 	/* No user PGD mapped for this CPU yet */
 	ci->ci_xen_current_user_pgd = 0;
 #endif
+#if defined(__x86_64__) || defined(PAE)
+	mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM);
+#endif
 
 	atomic_or_32(&cpus_running, ci->ci_cpumask);
 	atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
@@ -1172,62 +1175,76 @@
  * Loads pmap for the current CPU.
  */
 void
-cpu_load_pmap(struct pmap *pmap)
+cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
 {
+#if defined(__x86_64__) || defined(PAE)
+	struct cpu_info *ci = curcpu();
+	uint32_t cpumask = ci->ci_cpumask;
+
+	mutex_enter(&ci->ci_kpm_mtx);
+	/* make new pmap visible to pmap_kpm_sync_xcall() */
+	atomic_or_32(&pmap->pm_xen_ptp_cpus, cpumask);
+#endif
 #ifdef i386
 #ifdef PAE
-	int i, s;
-	struct cpu_info *ci;
-
-	s = splvm(); /* just to be safe */
-	ci = curcpu();
-	paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
-	/* don't update the kernel L3 slot */
-	for (i = 0 ; i < PDP_SIZE - 1; i++) {
-		xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
-		    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
+	{
+		int i;
+		paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
+		/* don't update the kernel L3 slot */
+		for (i = 0 ; i < PDP_SIZE - 1; i++) {
+			xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t),
+			    xpmap_ptom(pmap->pm_pdirpa[i]) | PG_V);
+		}
+		tlbflush();
 	}
-	splx(s);
-	tlbflush();
 #else /* PAE */
 	lcr3(pmap_pdirpa(pmap, 0));
 #endif /* PAE */
 #endif /* i386 */
 
 #ifdef __x86_64__
-	int i, s;
-	pd_entry_t *new_pgd;
-	struct cpu_info *ci;
-	paddr_t l4_pd_ma;
+	{
+		int i;
+		pd_entry_t *new_pgd;
+		paddr_t l4_pd_ma;
+
+		l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
 
-	ci = curcpu();
-	l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa);
+		/*
+		 * Map user space address in kernel space and load
+		 * user cr3
+		 */
+		new_pgd = pmap->pm_pdir;
+		KASSERT(pmap == ci->ci_pmap);
 
-	/*
-	 * Map user space address in kernel space and load
-	 * user cr3
-	 */
-	s = splvm();
-	new_pgd = pmap->pm_pdir;
+		/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
+		for (i = 0; i < PDIR_SLOT_PTE; i++) {
+			KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0);
+			if (ci->ci_kpm_pdir[i] != new_pgd[i]) {
+				xpq_queue_pte_update(
+				   l4_pd_ma + i * sizeof(pd_entry_t),
+				    new_pgd[i]);
+			}
+		}
 
-	/* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */
-	for (i = 0; i < PDIR_SLOT_PTE; i++) {
-		xpq_queue_pte_update(l4_pd_ma + i * sizeof(pd_entry_t), new_pgd[i]);
+		if (__predict_true(pmap != pmap_kernel())) {
+			xen_set_user_pgd(pmap_pdirpa(pmap, 0));
+			ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
+		}
+		else {
+			xpq_queue_pt_switch(l4_pd_ma);
+			ci->ci_xen_current_user_pgd = 0;
+		}
+
+		tlbflush();
 	}
 
-	if (__predict_true(pmap != pmap_kernel())) {
-		xen_set_user_pgd(pmap_pdirpa(pmap, 0));
-		ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
-	}
-	else {
-		xpq_queue_pt_switch(l4_pd_ma);
-		ci->ci_xen_current_user_pgd = 0;
-	}
-
-	tlbflush();
-	splx(s);
-
 #endif /* __x86_64__ */
+#if defined(__x86_64__) || defined(PAE)
+	/* old pmap no longer visible to pmap_kpm_sync_xcall() */
+	atomic_and_32(&oldpmap->pm_xen_ptp_cpus, ~cpumask);
+	mutex_exit(&ci->ci_kpm_mtx);
+#endif
 }
 
  /*
--- a/sys/arch/xen/x86/x86_xpmap.c	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/xen/x86/x86_xpmap.c	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: x86_xpmap.c,v 1.38 2012/01/12 19:49:37 cherry Exp $	*/
+/*	$NetBSD: x86_xpmap.c,v 1.39 2012/02/17 18:40:20 bouyer Exp $	*/
 
 /*
  * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
@@ -69,7 +69,7 @@
 
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.38 2012/01/12 19:49:37 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.39 2012/02/17 18:40:20 bouyer Exp $");
 
 #include "opt_xen.h"
 #include "opt_ddb.h"
@@ -185,8 +185,12 @@
 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
 
 	if (xpq_idx != 0 && ret < 0) {
-		printf("xpq_flush_queue: %d entries (%d successful)\n",
-		    xpq_idx, ok);
+		struct cpu_info *ci;
+		CPU_INFO_ITERATOR cii;
+
+		printf("xpq_flush_queue: %d entries (%d successful) on "
+		    "cpu%d (%ld)\n",
+		    xpq_idx, ok, xpq_cpu()->ci_index, xpq_cpu()->ci_cpuid);
 
 		if (ok != 0) {
 			xpq_queue += ok;
@@ -195,9 +199,23 @@
 			goto retry;
 		}
 
-		for (i = 0; i < xpq_idx; i++)
-			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
-			   xpq_queue[i].ptr, xpq_queue[i].val);
+		for (CPU_INFO_FOREACH(cii, ci)) {
+			xpq_queue = xpq_queue_array[ci->ci_cpuid];
+			xpq_idx = xpq_idx_array[ci->ci_cpuid];
+			printf("cpu%d (%ld):\n", ci->ci_index, ci->ci_cpuid);
+			for (i = 0; i < xpq_idx; i++) {
+				printf("  0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
+				   xpq_queue[i].ptr, xpq_queue[i].val);
+			}
+#ifdef __x86_64__
+			for (i = 0; i < PDIR_SLOT_PTE; i++) {
+				if (ci->ci_kpm_pdir[i] == 0)
+					continue;
+				printf(" kpm_pdir[%d]: 0x%" PRIx64 "\n",
+				    i, ci->ci_kpm_pdir[i]);
+			}
+#endif
+		}
 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
 	}
 	xpq_idx_array[xpq_cpu()->ci_cpuid] = 0;
--- a/sys/arch/xen/x86/xen_ipi.c	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/xen/x86/xen_ipi.c	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xen_ipi.c,v 1.9 2011/12/30 12:16:19 cherry Exp $ */
+/* $NetBSD: xen_ipi.c,v 1.10 2012/02/17 18:40:20 bouyer Exp $ */
 
 /*-
  * Copyright (c) 2011 The NetBSD Foundation, Inc.
@@ -33,22 +33,21 @@
 
 /* 
  * Based on: x86/ipi.c
- * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.9 2011/12/30 12:16:19 cherry Exp $"); 
+ * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.10 2012/02/17 18:40:20 bouyer Exp $"); 
  */
 
-__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.9 2011/12/30 12:16:19 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.10 2012/02/17 18:40:20 bouyer Exp $");
 
 #include <sys/types.h>
 
 #include <sys/atomic.h>
+#include <sys/cpu.h>
 #include <sys/mutex.h>
-#include <sys/cpu.h>
 #include <sys/device.h>
 #include <sys/xcall.h>
 #include <sys/errno.h>
 #include <sys/systm.h>
 
-#include <machine/cpu.h>
 #ifdef __x86_64__
 #include <machine/fpu.h>
 #else
--- a/sys/arch/xen/x86/xen_pmap.c	Fri Feb 17 16:57:57 2012 +0000
+++ b/sys/arch/xen/x86/xen_pmap.c	Fri Feb 17 18:40:18 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_pmap.c,v 1.16 2012/01/28 07:19:17 cherry Exp $	*/
+/*	$NetBSD: xen_pmap.c,v 1.17 2012/02/17 18:40:20 bouyer Exp $	*/
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -102,7 +102,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.16 2012/01/28 07:19:17 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.17 2012/02/17 18:40:20 bouyer Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -350,34 +350,7 @@
 		xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
 		pmap->pm_pdir[index]);
 #endif /* PAE */
-}
-
-static void
-pmap_kpm_sync_xcall(void *arg1, void *arg2)
-{
-	KASSERT(arg1 != NULL);
-	KASSERT(arg2 != NULL);
-
-	struct pmap *pmap = arg1;
-	int index = *(int *)arg2;
-	KASSERT(pmap == pmap_kernel() || index < PDIR_SLOT_PTE);
-	
-	struct cpu_info *ci = xpq_cpu();
-
-#ifdef PAE
-	KASSERTMSG(pmap == pmap_kernel(), "%s not allowed for PAE user pmaps", __func__);
-#endif /* PAE */
-
-	if (__predict_true(pmap != pmap_kernel()) &&
-	    pmap != ci->ci_pmap) {
-		/* User pmap changed. Nothing to do. */
-		return;
-	}
-
-	/* Update per-cpu kpm */
-	pmap_kpm_setpte(ci, pmap, index);
-	pmap_pte_flush();
-	return;
+	xpq_flush_queue();
 }
 
 /*
@@ -387,68 +360,30 @@
 void
 xen_kpm_sync(struct pmap *pmap, int index)
 {
-	uint64_t where;
+	CPU_INFO_ITERATOR cii;
+	struct cpu_info *ci;
 	
 	KASSERT(pmap != NULL);
 
 	pmap_pte_flush();
 
-	if (__predict_false(xpq_cpu != &x86_curcpu)) { /* Too early to xcall */
-		CPU_INFO_ITERATOR cii;
-		struct cpu_info *ci;
-		int s = splvm();
-		for (CPU_INFO_FOREACH(cii, ci)) {
-			if (ci == NULL) {
-				continue;
-			}
-			if (pmap == pmap_kernel() ||
-			    ci->ci_cpumask & pmap->pm_cpus) {
-				pmap_kpm_setpte(ci, pmap, index);
-			}
+	for (CPU_INFO_FOREACH(cii, ci)) {
+		if (ci == NULL) {
+			continue;
 		}
-		pmap_pte_flush();
-		splx(s);
-		return;
-	}
-
-	if (pmap == pmap_kernel()) {
-		where = xc_broadcast(XC_HIGHPRI,
-		    pmap_kpm_sync_xcall, pmap, &index);
-		xc_wait(where);
-	} else {
-		KASSERT(mutex_owned(pmap->pm_lock));
-		KASSERT(kpreempt_disabled());
+		if (pmap != pmap_kernel() &&
+		    (ci->ci_cpumask & pmap->pm_xen_ptp_cpus) == 0)
+			continue;
 
-		CPU_INFO_ITERATOR cii;
-		struct cpu_info *ci;
-		for (CPU_INFO_FOREACH(cii, ci)) {
-			if (ci == NULL) {
-				continue;
-			}
-			while (ci->ci_cpumask & pmap->pm_cpus) {
-#ifdef MULTIPROCESSOR
-#define CPU_IS_CURCPU(ci) __predict_false((ci) == curcpu())
-#else /* MULTIPROCESSOR */
-#define CPU_IS_CURCPU(ci) __predict_true((ci) == curcpu())
-#endif /* MULTIPROCESSOR */
-#if 0 /* XXX: Race with remote pmap_load() */
-				if (ci->ci_want_pmapload &&
-				    !CPU_IS_CURCPU(ci)) {
-					/*
-					 * XXX: make this more cpu
-					 *  cycle friendly/co-operate
-					 *  with pmap_load()
-					 */
-					continue;
-				    }
-#endif /* 0 */
-				where = xc_unicast(XC_HIGHPRI, pmap_kpm_sync_xcall,
-				    pmap, &index, ci);
-				xc_wait(where);
-				break;
-			}
+		/* take the lock and check again */
+		mutex_enter(&ci->ci_kpm_mtx);
+		if (pmap == pmap_kernel() ||
+		    (ci->ci_cpumask & pmap->pm_xen_ptp_cpus) != 0) {
+			pmap_kpm_setpte(ci, pmap, index);
 		}
+		mutex_exit(&ci->ci_kpm_mtx);
 	}
+	return;
 }
 
 #endif /* PAE || __x86_64__ */