- fix page loaning XXX make O->A loaning further yamt-pagecache
authoryamt <yamt@NetBSD.org>
Sun, 20 Nov 2011 10:52:33 +0000
branchyamt-pagecache
changeset 280321 45795c28b299
parent 280320 0049bf52b8d8
child 280322 42edd38096c1
- fix page loaning XXX make O->A loaning further - add some statistics
sys/kern/init_main.c
sys/uvm/uvm.h
sys/uvm/uvm_extern.h
sys/uvm/uvm_init.c
sys/uvm/uvm_loan.c
sys/uvm/uvm_meter.c
sys/uvm/uvm_page.c
sys/uvm/uvm_page.h
sys/uvm/uvm_page_status.c
--- a/sys/kern/init_main.c	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/kern/init_main.c	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: init_main.c,v 1.436.2.1 2011/11/02 21:53:59 yamt Exp $	*/
+/*	$NetBSD: init_main.c,v 1.436.2.2 2011/11/20 10:52:33 yamt Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.436.2.1 2011/11/02 21:53:59 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.436.2.2 2011/11/20 10:52:33 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_ipsec.h"
@@ -324,10 +324,6 @@
 
 	percpu_init();
 
-	/* Initialize lock caches. */
-	mutex_obj_init();
-	rw_obj_init();
-
 	/* Passive serialization. */
 	pserialize_init();
 
--- a/sys/uvm/uvm.h	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm.h	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm.h,v 1.62.4.2 2011/11/12 02:54:04 yamt Exp $	*/
+/*	$NetBSD: uvm.h,v 1.62.4.3 2011/11/20 10:52:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -88,6 +88,19 @@
 	 * 	[1] anonymous (PQ_SWAPBACKED)
 	 */
 	int64_t pagestate[2][UVM_PAGE_NUM_STATUS];
+
+	int64_t loan_obj;	/* O->K loan */
+	int64_t unloan_obj;	/* O->K unloan */
+	int64_t loanbreak_obj;	/* O->K loan resolved on write */
+	int64_t loanfree_obj;	/* O->K loan resolved on free */
+
+	int64_t loan_anon;	/* A->K loan */
+	int64_t unloan_anon;	/* A->K unloan */
+	int64_t loanbreak_anon;	/* A->K loan resolved on write */
+	int64_t loanfree_anon;	/* A->K loan resolved on free */
+
+	int64_t loan_zero;	/* O->K loan (zero) */
+	int64_t unloan_zero;	/* O->K unloan (zero) */
 };
 
 /*
--- a/sys/uvm/uvm_extern.h	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_extern.h	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_extern.h,v 1.176.2.3 2011/11/14 14:24:54 yamt Exp $	*/
+/*	$NetBSD: uvm_extern.h,v 1.176.2.4 2011/11/20 10:52:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -456,12 +456,26 @@
 	int64_t colorhit;
 	int64_t colormiss;
 	int64_t ncolors;
+
 	int64_t possiblydirtypages;
 	int64_t cleanpages;
 	int64_t dirtypages;
 	int64_t possiblydirtyanonpages;
 	int64_t cleananonpages;
 	int64_t dirtyanonpages;
+
+	int64_t loan_obj;	/* O->K loan */
+	int64_t unloan_obj;	/* O->K unloan */
+	int64_t loanbreak_obj;	/* O->K loan resolved on write */
+	int64_t loanfree_obj;	/* O->K loan resolved on free */
+
+	int64_t loan_anon;	/* A->K loan */
+	int64_t unloan_anon;	/* A->K unloan */
+	int64_t loanbreak_anon;	/* A->K loan resolved on write */
+	int64_t loanfree_anon;	/* A->K loan resolved on free */
+
+	int64_t loan_zero;	/* O->K loan (zero) */
+	int64_t unloan_zero;	/* O->K unloan (zero) */
 };
 
 #ifdef _KERNEL
--- a/sys/uvm/uvm_init.c	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_init.c	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_init.c,v 1.41 2011/04/24 03:56:50 rmind Exp $	*/
+/*	$NetBSD: uvm_init.c,v 1.41.4.1 2011/11/20 10:52:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.41 2011/04/24 03:56:50 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.41.4.1 2011/11/20 10:52:33 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -42,6 +42,8 @@
 #include <sys/resourcevar.h>
 #include <sys/kmem.h>
 #include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
 #include <sys/vnode.h>
 
 #include <uvm/uvm.h>
@@ -157,7 +159,15 @@
 	kmem_init();
 
 	/*
+	 * Initialize lock caches.
+	 */
+
+	mutex_obj_init();
+	rw_obj_init();
+
+	/*
 	 * Initialize the uvm_loan() facility.
+	 * REQUIRE: mutex_obj_init
 	 */
 
 	uvm_loan_init();
--- a/sys/uvm/uvm_loan.c	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_loan.c	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_loan.c,v 1.81.2.3 2011/11/18 00:57:33 yamt Exp $	*/
+/*	$NetBSD: uvm_loan.c,v 1.81.2.4 2011/11/20 10:52:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.3 2011/11/18 00:57:33 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.4 2011/11/20 10:52:33 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -340,6 +340,7 @@
 uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
     struct vm_anon *anon)
 {
+	struct uvm_cpu *ucpu;
 	struct vm_page *pg;
 	int error;
 
@@ -428,6 +429,11 @@
 	/* unlock and return success */
 	if (pg->uobject)
 		mutex_exit(pg->uobject->vmobjlock);
+
+	ucpu = uvm_cpu_get();
+	ucpu->loan_anon++;
+	uvm_cpu_put(ucpu);
+
 	UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
 	return (1);
 }
@@ -444,6 +450,7 @@
 static int
 uvm_loanpage(struct vm_page **pgpp, int npages)
 {
+	struct uvm_cpu *ucpu;
 	int i;
 	int error = 0;
 
@@ -475,15 +482,20 @@
 
 	uvm_page_unbusy(pgpp, npages);
 
-	if (error) {
-		/*
-		 * backout what we've done
-		 */
-		kmutex_t *slock = pgpp[0]->uobject->vmobjlock;
+	if (i > 0) {
+		ucpu = uvm_cpu_get();
+		ucpu->loan_obj += i;
+		uvm_cpu_put(ucpu);
+		if (error) {
+			/*
+			 * backout what we've done
+			 */
+			kmutex_t *slock = pgpp[0]->uobject->vmobjlock;
 
-		mutex_exit(slock);
-		uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
-		mutex_enter(slock);
+			mutex_exit(slock);
+			uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
+			mutex_enter(slock);
+		}
 	}
 
 	UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
@@ -829,7 +841,6 @@
  */
 
 static struct uvm_object uvm_loanzero_object;
-static kmutex_t uvm_loanzero_lock;
 
 static int
 uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
@@ -871,12 +882,17 @@
 	}
 
 	if ((flags & UVM_LOAN_TOANON) == 0) {	/* loaning to kernel-page */
+		struct uvm_cpu *ucpu;
+
 		mutex_enter(&uvm_pageqlock);
 		pg->loan_count++;
 		mutex_exit(&uvm_pageqlock);
 		mutex_exit(uvm_loanzero_object.vmobjlock);
 		**output = pg;
 		(*output)++;
+		ucpu = uvm_cpu_get();
+		ucpu->loan_zero++;
+		uvm_cpu_put(ucpu);
 		return (1);
 	}
 
@@ -965,6 +981,10 @@
 
 	mutex_enter(&uvm_pageqlock);
 	while (npages-- > 0) {
+		struct uvm_object *obj;
+		struct vm_anon *anon;
+		struct uvm_cpu *ucpu;
+
 		pg = *ploans++;
 
 		/*
@@ -998,22 +1018,35 @@
 		 * an anon) or free it (if the page is now unowned).
 		 */
 
+		obj = pg->uobject;
+		anon = pg->uanon;
 		KASSERT(pg->loan_count > 0);
 		pg->loan_count--;
-		if (pg->uobject == NULL && pg->uanon != NULL &&
+		if (obj == NULL && anon != NULL &&
 		    (pg->pqflags & PQ_ANON) == 0) {
 			KASSERT(pg->loan_count > 0);
 			pg->loan_count--;
 			pg->pqflags |= PQ_ANON;
 		}
-		if (pg->loan_count == 0 && pg->uobject == NULL &&
-		    pg->uanon == NULL) {
+		if (pg->loan_count == 0 && obj == NULL && anon == NULL) {
 			KASSERT((pg->flags & PG_BUSY) == 0);
 			uvm_pagefree(pg);
 		}
 		if (slock != NULL) {
 			mutex_exit(slock);
 		}
+		ucpu = uvm_cpu_get();
+		if (obj != NULL) {
+			KASSERT(anon == NULL); /* XXX no O->A loan */
+			if (obj == &uvm_loanzero_object) {
+				ucpu->unloan_zero++;
+			} else {
+				ucpu->unloan_obj++;
+			}
+		} else if (anon != NULL) {
+			ucpu->unloan_anon++;
+		}
+		uvm_cpu_put(ucpu);
 	}
 	mutex_exit(&uvm_pageqlock);
 }
@@ -1087,10 +1120,7 @@
 uvm_loan_init(void)
 {
 
-	mutex_init(&uvm_loanzero_lock, MUTEX_DEFAULT, IPL_NONE);
-	uvm_obj_init(&uvm_loanzero_object, &ulz_pager, false, 0);
-	uvm_obj_setlock(&uvm_loanzero_object, &uvm_loanzero_lock);
-
+	uvm_obj_init(&uvm_loanzero_object, &ulz_pager, true, 0);
 	UVMHIST_INIT(loanhist, 300);
 }
 
@@ -1105,14 +1135,17 @@
 struct vm_page *
 uvm_loanbreak(struct vm_page *uobjpage)
 {
+	struct uvm_cpu *ucpu;
 	struct vm_page *pg;
 #ifdef DIAGNOSTIC
 	struct uvm_object *uobj = uobjpage->uobject;
 #endif
+	const unsigned int count = uobjpage->loan_count;
 
 	KASSERT(uobj != NULL);
 	KASSERT(mutex_owned(uobj->vmobjlock));
 	KASSERT(uobjpage->flags & PG_BUSY);
+	KASSERT(count > 0);
 
 	/* alloc new un-owned page */
 	pg = uvm_pagealloc(NULL, 0, NULL, 0);
@@ -1124,39 +1157,30 @@
 	 * one and clear the fake flags on the new page (keep it busy).
 	 * force a reload of the old page by clearing it from all
 	 * pmaps.
-	 * transfer dirtiness of the old page to the new page.
 	 * then lock the page queues to rename the pages.
 	 */
 
 	uvm_pagecopy(uobjpage, pg);	/* old -> new */
 	pg->flags &= ~PG_FAKE;
+	KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
 	pmap_page_protect(uobjpage, VM_PROT_NONE);
-	if (uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_UNKNOWN &&
-	    !pmap_clear_modify(uobjpage)) {
-		uvm_pagemarkdirty(uobjpage, UVM_PAGE_STATUS_CLEAN);
-	}
-	if (uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_CLEAN) {
-		uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_CLEAN);
-	} else {
-		/* uvm_pagecopy marked it dirty */
-		KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
-		/* a object with a dirty page should be dirty. */
-		KASSERT(!UVM_OBJ_IS_CLEAN(uobj));
-	}
 	if (uobjpage->flags & PG_WANTED)
 		wakeup(uobjpage);
 	/* uobj still locked */
 	uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
 	UVM_PAGE_OWN(uobjpage, NULL);
 
-	mutex_enter(&uvm_pageqlock);
-
 	/*
 	 * replace uobjpage with new page.
+	 *
+	 * this will update the page dirtiness statistics.
 	 */
 
 	uvm_pagereplace(uobjpage, pg);
 
+	mutex_enter(&uvm_pageqlock);
+	KASSERT(uobjpage->uanon == NULL); /* XXX no O->A loan */
+
 	/*
 	 * if the page is no longer referenced by
 	 * an anon (i.e. we are breaking an O->K
@@ -1179,16 +1203,23 @@
 	 * PG_BUSY.   it can now replace uobjpage.
 	 */
 
+	ucpu = uvm_cpu_get();
+	ucpu->loanbreak_obj += count;
+	uvm_cpu_put(ucpu);
 	return pg;
 }
 
 int
 uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
 {
+	struct uvm_cpu *ucpu;
 	struct vm_page *pg;
+	unsigned int oldstatus;
+	const unsigned int count = anon->an_page->loan_count;
 
 	KASSERT(mutex_owned(anon->an_lock));
 	KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
+	KASSERT(count > 0);
 
 	/* get new un-owned replacement page */
 	pg = uvm_pagealloc(NULL, 0, NULL, 0);
@@ -1198,15 +1229,18 @@
 
 	/* copy old -> new */
 	uvm_pagecopy(anon->an_page, pg);
+	KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
 
 	/* force reload */
 	pmap_page_protect(anon->an_page, VM_PROT_NONE);
+	oldstatus = uvm_pagegetdirty(anon->an_page);
 	mutex_enter(&uvm_pageqlock);	  /* KILL loan */
 
 	anon->an_page->uanon = NULL;
 	/* in case we owned */
 	anon->an_page->pqflags &= ~PQ_ANON;
 
+	KASSERT(uobj == NULL); /* XXX O->A loan is currently broken */
 	if (uobj) {
 		/* if we were receiver of loan */
 		anon->an_page->loan_count--;
@@ -1234,6 +1268,12 @@
 	UVM_PAGE_OWN(pg, NULL);
 
 	/* done! */
-
+	if (uobj == NULL) {
+		ucpu = uvm_cpu_get();
+		ucpu->loanbreak_anon += count;
+		ucpu->pagestate[1][oldstatus]--;
+		ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY]++;
+		uvm_cpu_put(ucpu);
+	}
 	return 0;
 }
--- a/sys/uvm/uvm_meter.c	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_meter.c	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_meter.c,v 1.56.4.4 2011/11/14 14:24:54 yamt Exp $	*/
+/*	$NetBSD: uvm_meter.c,v 1.56.4.5 2011/11/20 10:52:34 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.4 2011/11/14 14:24:54 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.5 2011/11/20 10:52:34 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -176,6 +176,10 @@
 	u.colormiss = uvmexp.colormiss;
 	u.cpuhit = uvmexp.cpuhit;
 	u.cpumiss = uvmexp.cpumiss;
+	/*
+	 * XXX should use xcall
+	 * XXX should be an array
+	 */
 	for (CPU_INFO_FOREACH(cii, ci)) {
 		struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
 
@@ -187,6 +191,19 @@
 		    ucpu->pagestate[1][UVM_PAGE_STATUS_UNKNOWN];
 		u.cleananonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN];
 		u.dirtyanonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY];
+
+		u.loan_obj += ucpu->loan_obj;
+		u.unloan_obj += ucpu->unloan_obj;
+		u.loanbreak_obj += ucpu->loanbreak_obj;
+		u.loanfree_obj += ucpu->loanfree_obj;
+
+		u.loan_anon += ucpu->loan_anon;
+		u.unloan_anon += ucpu->unloan_anon;
+		u.loanbreak_anon += ucpu->loanbreak_anon;
+		u.loanfree_anon += ucpu->loanfree_anon;
+
+		u.loan_zero += ucpu->loan_zero;
+		u.unloan_zero += ucpu->unloan_zero;
 	}
 	node = *rnode;
 	node.sysctl_data = &u;
--- a/sys/uvm/uvm_page.c	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_page.c	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.178.2.6 2011/11/18 00:57:33 yamt Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.178.2.7 2011/11/20 10:52:34 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.6 2011/11/18 00:57:33 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.7 2011/11/20 10:52:34 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -166,25 +166,21 @@
  */
 
 static inline void
-uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
-    struct vm_page *where)
+uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg)
 {
 
 	KASSERT(uobj == pg->uobject);
 	KASSERT(mutex_owned(uobj->vmobjlock));
 	KASSERT((pg->flags & PG_TABLED) == 0);
-	KASSERT(where == NULL || (where->flags & PG_TABLED));
-	KASSERT(where == NULL || (where->uobject == uobj));
 
 	if ((pg->pqflags & PQ_STAT) != 0) {
 		struct uvm_cpu *ucpu;
 		const unsigned int status = uvm_pagegetdirty(pg);
 		const bool isaobj = (pg->pqflags & PQ_AOBJ) != 0;
 
-		kpreempt_disable();
-		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu = uvm_cpu_get();
 		ucpu->pagestate[isaobj][status]++;
-		kpreempt_enable();
+		uvm_cpu_put(ucpu);
 		if (!isaobj) {
 			KASSERT((pg->pqflags & PQ_FILE) != 0);
 			if (uobj->uo_npages == 0) {
@@ -235,7 +231,7 @@
 		KASSERT(error == ENOMEM);
 		return error;
 	}
-	uvm_pageinsert_list(uobj, pg, NULL);
+	uvm_pageinsert_list(uobj, pg);
 	return 0;
 }
 
@@ -258,10 +254,9 @@
 		const unsigned int status = uvm_pagegetdirty(pg);
 		const bool isaobj = (pg->pqflags & PQ_AOBJ) != 0;
 
-		kpreempt_disable();
-		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu = uvm_cpu_get();
 		ucpu->pagestate[isaobj][status]--;
-		kpreempt_enable();
+		uvm_cpu_put(ucpu);
 		if (!isaobj) {
 			KASSERT((pg->pqflags & PQ_FILE) != 0);
 			if (uobj->uo_npages == 1) {
@@ -1329,10 +1324,9 @@
 		anon->an_page = pg;
 		pg->pqflags = PQ_ANON;
 		atomic_inc_uint(&uvmexp.anonpages);
-		kpreempt_disable();
-		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu = uvm_cpu_get();
 		ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN]++;
-		kpreempt_enable();
+		uvm_cpu_put(ucpu);
 	} else {
 		if (obj) {
 			int error;
@@ -1414,7 +1408,12 @@
 			    UVM_PAGE_DIRTY_TAG);
 		}
 	}
-	uvm_pageinsert_list(uobj, newpg, oldpg);
+	/*
+	 * oldpg->pqflags is stable.  newpg is not reachable by others yet.
+	 */
+	newpg->pqflags =
+	    (newpg->pqflags & ~PQ_STAT) | (oldpg->pqflags & PQ_STAT);
+	uvm_pageinsert_list(uobj, newpg);
 	uvm_pageremove_list(uobj, oldpg);
 }
 
@@ -1522,6 +1521,8 @@
 	 */
 
 	if (pg->loan_count) {
+		struct uvm_object * const obj = pg->uobject;
+
 		KASSERT(pg->wire_count == 0);
 
 		/*
@@ -1535,8 +1536,8 @@
 		 * unbusy the page, and we're done.
 		 */
 
-		if (pg->uobject != NULL) {
-			uvm_pageremove(pg->uobject, pg);
+		if (obj != NULL) {
+			uvm_pageremove(obj, pg);
 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
 		} else if (pg->uanon != NULL) {
 			if ((pg->pqflags & PQ_ANON) == 0) {
@@ -1545,10 +1546,9 @@
 				pg->pqflags &= ~PQ_ANON;
 				atomic_dec_uint(&uvmexp.anonpages);
 				status = uvm_pagegetdirty(pg);
-				kpreempt_disable();
-				ucpu = curcpu()->ci_data.cpu_uvm;
+				ucpu = uvm_cpu_get();
 				ucpu->pagestate[1][status]--;
-				kpreempt_enable();
+				uvm_cpu_put(ucpu);
 			}
 			pg->uanon->an_page = NULL;
 			pg->uanon = NULL;
@@ -1565,6 +1565,13 @@
 			if (pg->uanon == NULL) {
 				uvm_pagedequeue(pg);
 			}
+			ucpu = uvm_cpu_get();
+			if (obj != NULL) {
+				ucpu->loanfree_obj += pg->loan_count;
+			} else {
+				ucpu->loanfree_anon += pg->loan_count;
+			}
+			uvm_cpu_put(ucpu);
 			return;
 		}
 	}
@@ -1579,10 +1586,9 @@
 		pg->uanon->an_page = NULL;
 		atomic_dec_uint(&uvmexp.anonpages);
 		status = uvm_pagegetdirty(pg);
-		kpreempt_disable();
-		ucpu = curcpu()->ci_data.cpu_uvm;
+		ucpu = uvm_cpu_get();
 		ucpu->pagestate[1][status]--;
-		kpreempt_enable();
+		uvm_cpu_put(ucpu);
 	}
 
 	/*
--- a/sys/uvm/uvm_page.h	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_page.h	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.73.2.6 2011/11/18 00:57:34 yamt Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.73.2.7 2011/11/20 10:52:34 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -300,6 +300,10 @@
 kmutex_t *uvm_page_getlock(struct vm_page *);
 bool uvm_page_samelock_p(struct vm_page *, struct vm_page *);
 
+struct uvm_cpu;
+struct uvm_cpu *uvm_cpu_get(void);
+void uvm_cpu_put(struct uvm_cpu *);
+
 /*
  * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty
  *
--- a/sys/uvm/uvm_page_status.c	Sun Nov 20 10:49:20 2011 +0000
+++ b/sys/uvm/uvm_page_status.c	Sun Nov 20 10:52:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page_status.c,v 1.1.2.4 2011/11/13 01:18:02 yamt Exp $	*/
+/*	$NetBSD: uvm_page_status.c,v 1.1.2.5 2011/11/20 10:52:35 yamt Exp $	*/
 
 /*-
  * Copyright (c)2011 YAMAMOTO Takashi,
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.4 2011/11/13 01:18:02 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.5 2011/11/20 10:52:35 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -73,11 +73,10 @@
 	struct uvm_cpu *ucpu;
 
 	KASSERT(oldstatus != newstatus);
-	kpreempt_disable();
-	ucpu = curcpu()->ci_data.cpu_uvm;
+	ucpu = uvm_cpu_get();
 	ucpu->pagestate[isanon][oldstatus]--;
 	ucpu->pagestate[isanon][newstatus]++;
-	kpreempt_enable();
+	uvm_cpu_put(ucpu);
 }
 
 /*
@@ -92,7 +91,6 @@
 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
 	const unsigned int oldstatus = uvm_pagegetdirty(pg);
 
-	KASSERT(uobj != NULL || pg->uanon != NULL);
 	KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0);
 	KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
 	KASSERT(uvm_page_locked_p(pg));
@@ -181,3 +179,20 @@
 	}
 	return modified;
 }
+
+struct uvm_cpu *
+uvm_cpu_get(void)
+{
+
+	kpreempt_disable();
+	return curcpu()->ci_data.cpu_uvm;
+}
+
+void
+uvm_cpu_put(struct uvm_cpu *ucpu)
+{
+
+	KASSERT(kpreempt_disabled());
+	KASSERT(curcpu()->ci_data.cpu_uvm == ucpu);
+	kpreempt_enable();
+}