adapt sparc64. compile tested. yamt-pagecache
authoryamt <yamt@NetBSD.org>
Fri, 02 Dec 2011 16:33:09 +0000
branchyamt-pagecache
changeset 280329 13ba88b4af2f
parent 280328 c6d2c78a51ff
child 280330 9c2feed8ba58
adapt sparc64. compile tested.
sys/arch/sparc64/include/pmap.h
sys/arch/sparc64/sparc64/pmap.c
--- a/sys/arch/sparc64/include/pmap.h	Wed Nov 30 14:36:36 2011 +0000
+++ b/sys/arch/sparc64/include/pmap.h	Fri Dec 02 16:33:09 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.55 2011/10/06 06:55:34 mrg Exp $	*/
+/*	$NetBSD: pmap.h,v 1.55.2.1 2011/12/02 16:33:09 yamt Exp $	*/
 
 /*-
  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
@@ -118,10 +118,8 @@
 #endif
 
 struct pmap {
-	struct uvm_object pm_obj;
-	kmutex_t pm_obj_lock;
-#define pm_lock pm_obj.vmobjlock
-#define pm_refs pm_obj.uo_refs
+	unsigned int pm_refs;
+	TAILQ_HEAD(, vm_page) pm_ptps;
 	LIST_ENTRY(pmap) pm_list[PMAP_LIST_MAXNUMCPU];	/* per cpu ctx used list */
 
 	struct pmap_statistics pm_stats;
--- a/sys/arch/sparc64/sparc64/pmap.c	Wed Nov 30 14:36:36 2011 +0000
+++ b/sys/arch/sparc64/sparc64/pmap.c	Fri Dec 02 16:33:09 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.275 2011/07/12 07:51:34 mrg Exp $	*/
+/*	$NetBSD: pmap.c,v 1.275.2.1 2011/12/02 16:33:09 yamt Exp $	*/
 /*
  *
  * Copyright (C) 1996-1999 Eduardo Horvath.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.275 2011/07/12 07:51:34 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.275.2.1 2011/12/02 16:33:09 yamt Exp $");
 
 #undef	NO_VCACHE /* Don't forget the locked TLB in dostart */
 #define	HWREF
@@ -1379,10 +1379,8 @@
 	memset(pm, 0, sizeof *pm);
 	DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
 
-	mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
-	uvm_obj_init(&pm->pm_obj, NULL, false, 1);
-	uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
-
+	pm->pm_refs = 1;
+	TAILQ_INIT(&pm->pm_ptps);
 	if (pm != pmap_kernel()) {
 		while (!pmap_get_page(&pm->pm_physaddr)) {
 			uvm_wait("pmap_create");
@@ -1416,7 +1414,7 @@
 #else
 #define pmap_cpus_active 0
 #endif
-	struct vm_page *pg, *nextpg;
+	struct vm_page *pg;
 
 	if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) {
 		return;
@@ -1444,22 +1442,18 @@
 #endif
 
 	/* we could be a little smarter and leave pages zeroed */
-	for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) {
+	while ((pg = TAILQ_FIRST(&pm->pm_ptps)) != NULL) {
 #ifdef DIAGNOSTIC
 		struct vm_page_md *md = VM_PAGE_TO_MD(pg);
 #endif
 
-		KASSERT((pg->flags & PG_MARKER) == 0);
-		nextpg = TAILQ_NEXT(pg, listq.queue);
-		TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
+		TAILQ_REMOVE(&pm->pm_ptps, pg, pageq.queue);
 		KASSERT(md->mdpg_pvh.pv_pmap == NULL);
 		dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active);
 		uvm_pagefree(pg);
 	}
 	pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active);
 
-	uvm_obj_destroy(&pm->pm_obj, false);
-	mutex_destroy(&pm->pm_obj_lock);
 	pool_cache_put(&pmap_cache, pm);
 }
 
@@ -1809,7 +1803,7 @@
 		ptpg = PHYS_TO_VM_PAGE(ptp);
 		if (ptpg) {
 			ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
-			TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
+			TAILQ_INSERT_TAIL(&pm->pm_ptps, ptpg, pageq.queue);
 		} else {
 			KASSERT(pm == pmap_kernel());
 		}
@@ -1821,7 +1815,7 @@
 		ptpg = PHYS_TO_VM_PAGE(ptp);
 		if (ptpg) {
 			ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
-			TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
+			TAILQ_INSERT_TAIL(&pm->pm_ptps, ptpg, pageq.queue);
 		} else {
 			KASSERT(pm == pmap_kernel());
 		}