Remove PG_k completely. trunk
authormaxv <maxv@NetBSD.org>
Thu, 23 Mar 2017 18:08:06 +0000
branchtrunk
changeset 249508 fd215332d2ca
parent 249507 23259eaa7cf7
child 249509 8995c7e65fbc
Remove PG_k completely.
sys/arch/i386/i386/machdep.c
sys/arch/x86/include/pmap.h
sys/arch/x86/x86/pmap.c
sys/arch/xen/x86/cpu.c
sys/arch/xen/x86/xen_pmap.c
--- a/sys/arch/i386/i386/machdep.c	Thu Mar 23 17:25:51 2017 +0000
+++ b/sys/arch/i386/i386/machdep.c	Thu Mar 23 18:08:06 2017 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.780 2017/02/23 03:34:22 kamil Exp $	*/
+/*	$NetBSD: machdep.c,v 1.781 2017/03/23 18:08:06 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.780 2017/02/23 03:34:22 kamil Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.781 2017/03/23 18:08:06 maxv Exp $");
 
 #include "opt_beep.h"
 #include "opt_compat_ibcs2.h"
@@ -1019,7 +1019,7 @@
 		pt_entry_t pte;
 
 		pte = pmap_pa2pte((vaddr_t)gdtstore - KERNBASE);
-		pte |= PG_k | PG_RO | xpmap_pg_nx | PG_V;
+		pte |= PG_RO | xpmap_pg_nx | PG_V;
 
 		if (HYPERVISOR_update_va_mapping((vaddr_t)gdtstore, pte,
 		    UVMF_INVLPG) < 0) {
@@ -1223,7 +1223,7 @@
 		pt_entry_t pte;
 
 		pte = pmap_pa2pte((vaddr_t)tmpgdt - KERNBASE);
-		pte |= PG_k | PG_RW | xpmap_pg_nx | PG_V;
+		pte |= PG_RW | xpmap_pg_nx | PG_V;
 
 		if (HYPERVISOR_update_va_mapping((vaddr_t)tmpgdt, pte, UVMF_INVLPG) < 0) {
 			panic("tmpgdt page relaim RW update failed.\n");
--- a/sys/arch/x86/include/pmap.h	Thu Mar 23 17:25:51 2017 +0000
+++ b/sys/arch/x86/include/pmap.h	Thu Mar 23 18:08:06 2017 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.63 2017/03/05 09:08:18 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.64 2017/03/23 18:08:06 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -180,8 +180,6 @@
 	((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
 #endif
 
-#define PG_k 0
-
 /*
  * MD flags that we use for pmap_enter and pmap_kenter_pa:
  */
--- a/sys/arch/x86/x86/pmap.c	Thu Mar 23 17:25:51 2017 +0000
+++ b/sys/arch/x86/x86/pmap.c	Thu Mar 23 18:08:06 2017 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.243 2017/03/15 16:42:18 maxv Exp $	*/
+/*	$NetBSD: pmap.c,v 1.244 2017/03/23 18:08:06 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.243 2017/03/15 16:42:18 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.244 2017/03/23 18:08:06 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -981,7 +981,7 @@
 	} else
 #endif /* DOM0OPS */
 		npte = pmap_pa2pte(pa);
-	npte |= protection_codes[prot] | PG_k | PG_V | pmap_pg_g;
+	npte |= protection_codes[prot] | PG_V | pmap_pg_g;
 	npte |= pmap_pat_flags(flags);
 	opte = pmap_pte_testset(pte, npte); /* zap! */
 #if defined(DIAGNOSTIC)
@@ -1018,7 +1018,7 @@
 		npte = pmap_pa2pte(pa);
 
 	npte = pmap_pa2pte(pa);
-	npte |= protection_codes[prot] | PG_k | PG_V;
+	npte |= protection_codes[prot] | PG_V;
 	pmap_pte_set(pte, npte);
 }
 
@@ -1354,7 +1354,7 @@
 	memset((void *)(xen_dummy_user_pgd + KERNBASE), 0, PAGE_SIZE);
 	/* Mark read-only */
 	HYPERVISOR_update_va_mapping(xen_dummy_user_pgd + KERNBASE,
-	    pmap_pa2pte(xen_dummy_user_pgd) | PG_k | PG_V | pmap_pg_nx,
+	    pmap_pa2pte(xen_dummy_user_pgd) | PG_V | pmap_pg_nx,
 	    UVMF_INVLPG);
 	/* Pin as L4 */
 	xpq_queue_pin_l4_table(xpmap_ptom_masked(xen_dummy_user_pgd));
@@ -2109,7 +2109,7 @@
 	 * This pdir will NEVER be active in kernel mode, so mark
 	 * recursive entry invalid.
 	 */
-	pdir[PDIR_SLOT_PTE] = pmap_pa2pte(pdirpa) | PG_k;
+	pdir[PDIR_SLOT_PTE] = pmap_pa2pte(pdirpa);
 
 	/*
 	 * PDP constructed this way won't be for the kernel, hence we
@@ -3076,8 +3076,7 @@
 	pt_entry_t *zpte;
 	vaddr_t zerova;
 
-	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_M | PG_U |
-	    PG_k;
+	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_M | PG_U;
 
 	kpreempt_disable();
 
@@ -3123,8 +3122,7 @@
 	vaddr_t zerova;
 	bool rv;
 
-	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_M | PG_U |
-	    PG_k;
+	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_M | PG_U;
 
 	ci = curcpu();
 	zerova = ci->vpage[VPAGE_ZER];
@@ -3171,7 +3169,7 @@
 	pt_entry_t *srcpte, *dstpte;
 	vaddr_t srcva, dstva;
 
-	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_U | PG_k;
+	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_U;
 
 	kpreempt_disable();
 
@@ -3213,10 +3211,9 @@
 	KASSERT(kpreempt_disabled());
 
 #ifndef XEN
-	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_U | PG_M |
-	    PG_k;
+	const pd_entry_t pteflags = PG_V | PG_RW | pmap_pg_nx | PG_U | PG_M;
 #else
-	const pd_entry_t pteflags = PG_V | pmap_pg_nx | PG_U | PG_M | PG_k;
+	const pd_entry_t pteflags = PG_V | pmap_pg_nx | PG_U | PG_M;
 #endif
 
 	ci = curcpu();
@@ -4074,8 +4071,7 @@
 		npte |= PG_u;
 	else if (va < VM_MAX_ADDRESS)
 		panic("PTE space accessed");	/* XXXmaxv: no longer needed? */
-	else
-		npte |= PG_k;
+
 	if (pmap == pmap_kernel())
 		npte |= pmap_pg_g;
 	if (flags & VM_PROT_ALL) {
@@ -4271,7 +4267,7 @@
 #endif
 		kpreempt_disable();
 		pmap_pte_set(early_zero_pte, pmap_pa2pte(pa) | PG_V |
-		    PG_RW | pmap_pg_nx | PG_k);
+		    PG_RW | pmap_pg_nx);
 		pmap_pte_flush();
 		pmap_update_pg((vaddr_t)early_zerop);
 		memset(early_zerop, 0, PAGE_SIZE);
@@ -4329,7 +4325,7 @@
 
 			KASSERT(!pmap_valid_entry(pdep[i]));
 			pa = pmap_get_physpage();
-			pte = pmap_pa2pte(pa) | PG_k | PG_V | PG_RW;
+			pte = pmap_pa2pte(pa) | PG_V | PG_RW;
 			pmap_pte_set(&pdep[i], pte);
 
 #if defined(XEN) && (defined(PAE) || defined(__x86_64__))
--- a/sys/arch/xen/x86/cpu.c	Thu Mar 23 17:25:51 2017 +0000
+++ b/sys/arch/xen/x86/cpu.c	Thu Mar 23 18:08:06 2017 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.109 2017/02/11 14:11:24 maxv Exp $	*/
+/*	$NetBSD: cpu.c,v 1.110 2017/03/23 18:08:06 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.109 2017/02/11 14:11:24 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.110 2017/03/23 18:08:06 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -1228,7 +1228,7 @@
 
 	/* Recursive kernel mapping */
 	ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa)
-	    | PG_k | PG_V | xpmap_pg_nx;
+	    | PG_V | xpmap_pg_nx;
 #elif defined(PAE)
 	/* Copy over the pmap_kernel() shadow L2 entries */
 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN,
@@ -1244,7 +1244,7 @@
 	 * Initialize L3 entry 3. This mapping is shared across all pmaps and is
 	 * static, ie: loading a new pmap will not update this entry.
 	 */
-	ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
+	ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_V;
 
 	/* Xen wants a RO L3. */
 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_pae_l3_pdir,
--- a/sys/arch/xen/x86/xen_pmap.c	Thu Mar 23 17:25:51 2017 +0000
+++ b/sys/arch/xen/x86/xen_pmap.c	Thu Mar 23 18:08:06 2017 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_pmap.c,v 1.25 2016/12/26 08:53:11 cherry Exp $	*/
+/*	$NetBSD: xen_pmap.c,v 1.26 2017/03/23 18:08:06 maxv Exp $	*/
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -22,7 +22,6 @@
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
  */
 
 /*
@@ -102,7 +101,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.25 2016/12/26 08:53:11 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.26 2017/03/23 18:08:06 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -147,7 +146,7 @@
 int
 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
 {
-        paddr_t ma;
+	paddr_t ma;
 
 	if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) {
 		ma = pa; /* XXX hack */
@@ -176,15 +175,14 @@
 	else
 		pte = kvtopte(va);
 
-	npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
-	     PG_V | PG_k;
+	npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) | PG_V;
 	if (flags & PMAP_NOCACHE)
 		npte |= PG_N;
 
 	if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
 		npte |= PG_NX;
 
-	opte = pmap_pte_testset (pte, npte); /* zap! */
+	opte = pmap_pte_testset(pte, npte); /* zap! */
 
 	if (pmap_valid_entry(opte)) {
 #if defined(MULTIPROCESSOR)
@@ -192,7 +190,8 @@
 			pmap_update_pg(va);
 		} else {
 			kpreempt_disable();
-			pmap_tlb_shootdown(pmap_kernel(), va, opte, TLBSHOOT_KENTER);
+			pmap_tlb_shootdown(pmap_kernel(), va, opte,
+			    TLBSHOOT_KENTER);
 			kpreempt_enable();
 		}
 #else
@@ -335,13 +334,13 @@
 	}
 #ifdef PAE
 	xpq_queue_pte_update(
-		xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
-		pmap->pm_pdir[index]);
+	    xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
+	    pmap->pm_pdir[index]);
 #elif defined(__x86_64__)
 	xpq_queue_pte_update(
-		xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
-		pmap->pm_pdir[index]);
-#endif /* PAE */
+	    xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
+	    pmap->pm_pdir[index]);
+#endif
 	xpq_flush_queue();
 }