Pull up following revision(s) (requested by bouyer in ticket #31): netbsd-6
authorriz <riz@NetBSD.org>
Wed, 22 Feb 2012 18:59:05 +0000
branchnetbsd-6
changeset 255434 2ecf83854b19
parent 255433 1bcb8fb3e499
child 255435 5b0a3aab012c
Pull up following revision(s) (requested by bouyer in ticket #31): sys/arch/x86/x86/pmap.c: revision 1.166 sys/arch/xen/x86/cpu.c: revision 1.83 - Make pmap_write_protect() work with pmap_kernel() too ((va & L2_FRAME) strips the high bits of a LP64 address) - use pmap_protect() in pmap_pdp_ctor() to remap the PDP read-only instead of (ab)using pmap_kenter_pa(). No more "mapping already present" on console with DIAGNOSTIC kernels - make sure to zero the whole PDP (NTOPLEVEL_PDES doens't include high-level entries on i386 and i386PAE, reserved by Xen). Not sure how it has worked before - remove an always-true test (&& pmap != pmap_kernel(); we KASSERT that at the function entry). use pmap_protect() instead of pmap_kenter_pa() to remap R/O an exiting page. This gets rid of the last "mapping already present" warnings.
sys/arch/x86/x86/pmap.c
sys/arch/xen/x86/cpu.c
--- a/sys/arch/x86/x86/pmap.c	Wed Feb 22 18:56:45 2012 +0000
+++ b/sys/arch/x86/x86/pmap.c	Wed Feb 22 18:59:05 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.164.2.1 2012/02/22 18:56:47 riz Exp $	*/
+/*	$NetBSD: pmap.c,v 1.164.2.2 2012/02/22 18:59:05 riz Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.164.2.1 2012/02/22 18:56:47 riz Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.164.2.2 2012/02/22 18:59:05 riz Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -1979,7 +1979,7 @@
 		pmap_pte_set(&pva[index], (pd_entry_t)
 		        (pmap_pa2pte(pa) | PG_u | PG_RW | PG_V));
 #if defined(XEN) && defined(__x86_64__)
-		if(i == PTP_LEVELS && pmap != pmap_kernel()) {
+		if(i == PTP_LEVELS) {
 			/*
 			 * Update the per-cpu PD on all cpus the current
 			 * pmap is active on 
@@ -2090,8 +2090,8 @@
 	    npde * sizeof(pd_entry_t));
 
 	/* zero the rest */
-	memset(&pdir[PDIR_SLOT_KERN + npde], 0,
-	    (NTOPLEVEL_PDES - (PDIR_SLOT_KERN + npde)) * sizeof(pd_entry_t));
+	memset(&pdir[PDIR_SLOT_KERN + npde], 0, (PAGE_SIZE * PDP_SIZE) -
+	    (PDIR_SLOT_KERN + npde) * sizeof(pd_entry_t));
 
 	if (VM_MIN_KERNEL_ADDRESS != KERNBASE) {
 		int idx = pl_i(KERNBASE, PTP_LEVELS);
@@ -2107,11 +2107,10 @@
 #ifdef XEN
 	s = splvm();
 	object = (vaddr_t)v;
+	pmap_protect(pmap_kernel(), object, object + (PAGE_SIZE * PDP_SIZE),
+	    VM_PROT_READ);
+	pmap_update(pmap_kernel());
 	for (i = 0; i < PDP_SIZE; i++, object += PAGE_SIZE) {
-		(void) pmap_extract(pmap_kernel(), object, &pdirpa);
-		/* FIXME: This should use pmap_protect() .. */
-		pmap_kenter_pa(object, pdirpa, VM_PROT_READ, 0);
-		pmap_update(pmap_kernel());
 		/*
 		 * pin as L2/L4 page, we have to do the page with the
 		 * PDIR_SLOT_PTE entries last
@@ -2121,6 +2120,7 @@
 			continue;
 #endif
 
+		(void) pmap_extract(pmap_kernel(), object, &pdirpa);
 #ifdef __x86_64__
 		xpq_queue_pin_l4_table(xpmap_ptom_masked(pdirpa));
 #else
@@ -3779,7 +3779,7 @@
 		pt_entry_t *spte, *epte;
 		int i;
 
-		blockend = (va & L2_FRAME) + NBPD_L2;
+		blockend = x86_round_pdr(va + 1);
 		if (blockend > eva)
 			blockend = eva;
 
--- a/sys/arch/xen/x86/cpu.c	Wed Feb 22 18:56:45 2012 +0000
+++ b/sys/arch/xen/x86/cpu.c	Wed Feb 22 18:59:05 2012 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.80.2.1 2012/02/22 18:56:45 riz Exp $	*/
+/*	$NetBSD: cpu.c,v 1.80.2.2 2012/02/22 18:59:05 riz Exp $	*/
 /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp  */
 
 /*-
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.80.2.1 2012/02/22 18:56:45 riz Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.80.2.2 2012/02/22 18:59:05 riz Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -1324,9 +1324,9 @@
 #endif /* __x86_64__ else PAE */
 
 	/* Xen wants R/O */
-	pmap_kenter_pa((vaddr_t)ci->ci_kpm_pdir, ci->ci_kpm_pdirpa,
-	    VM_PROT_READ, 0);
-
+	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_kpm_pdir,
+	    (vaddr_t)ci->ci_kpm_pdir + PAGE_SIZE, VM_PROT_READ);
+	pmap_update(pmap_kernel());
 #if defined(PAE)
 	/* Initialise L3 entry 3. This mapping is shared across all
 	 * pmaps and is static, ie; loading a new pmap will not update
@@ -1336,8 +1336,9 @@
 	ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
 
 	/* Mark L3 R/O (Xen wants this) */
-	pmap_kenter_pa((vaddr_t)ci->ci_pae_l3_pdir, ci->ci_pae_l3_pdirpa,
-		VM_PROT_READ, 0);
+	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_pae_l3_pdir,
+	    (vaddr_t)ci->ci_pae_l3_pdir + PAGE_SIZE, VM_PROT_READ);
+	pmap_update(pmap_kernel());
 
 	xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa));