- track the number of clean/dirty/unknown pages in the system. yamt-pagecache
authoryamt <yamt@NetBSD.org>
Fri, 11 Nov 2011 10:34:24 +0000
branchyamt-pagecache
changeset 280309 da732f520a27
parent 280308 d95c2faee230
child 280310 e463764bb7c9
- track the number of clean/dirty/unknown pages in the system. - g/c PG_MARKER
sys/uvm/uvm.h
sys/uvm/uvm_extern.h
sys/uvm/uvm_meter.c
sys/uvm/uvm_page.c
sys/uvm/uvm_page.h
sys/uvm/uvm_page_status.c
--- a/sys/uvm/uvm.h	Thu Nov 10 23:51:17 2011 +0000
+++ b/sys/uvm/uvm.h	Fri Nov 11 10:34:24 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm.h,v 1.62 2011/05/17 04:18:07 mrg Exp $	*/
+/*	$NetBSD: uvm.h,v 1.62.4.1 2011/11/11 10:34:24 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -81,6 +81,8 @@
 					   pages in the idle loop */
 	int pages[PGFL_NQUEUES];	/* total of pages in page_free */
 	u_int emap_gen;			/* emap generation number */
+
+	int64_t pagestate[UVM_PAGE_NUM_STATUS];
 };
 
 /*
--- a/sys/uvm/uvm_extern.h	Thu Nov 10 23:51:17 2011 +0000
+++ b/sys/uvm/uvm_extern.h	Fri Nov 11 10:34:24 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_extern.h,v 1.176 2011/09/01 06:40:28 matt Exp $	*/
+/*	$NetBSD: uvm_extern.h,v 1.176.2.1 2011/11/11 10:34:24 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -456,6 +456,9 @@
 	int64_t colorhit;
 	int64_t colormiss;
 	int64_t ncolors;
+	int64_t mightdirtypages;
+	int64_t cleanpages;
+	int64_t dirtypages;
 };
 
 #ifdef _KERNEL
--- a/sys/uvm/uvm_meter.c	Thu Nov 10 23:51:17 2011 +0000
+++ b/sys/uvm/uvm_meter.c	Fri Nov 11 10:34:24 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_meter.c,v 1.56 2011/02/02 15:25:27 chuck Exp $	*/
+/*	$NetBSD: uvm_meter.c,v 1.56.4.1 2011/11/11 10:34:24 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56 2011/02/02 15:25:27 chuck Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.1 2011/11/11 10:34:24 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -176,7 +176,13 @@
 	u.colormiss = uvmexp.colormiss;
 	u.cpuhit = uvmexp.cpuhit;
 	u.cpumiss = uvmexp.cpumiss;
+	for (CPU_INFO_FOREACH(cii, ci)) {
+		struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
 
+		u.mightdirtypages += ucpu->pagestate[UVM_PAGE_STATUS_UNKNOWN];
+		u.cleanpages += ucpu->pagestate[UVM_PAGE_STATUS_CLEAN];
+		u.dirtypages += ucpu->pagestate[UVM_PAGE_STATUS_DIRTY];
+	}
 	node = *rnode;
 	node.sysctl_data = &u;
 	node.sysctl_size = sizeof(u);
--- a/sys/uvm/uvm_page.c	Thu Nov 10 23:51:17 2011 +0000
+++ b/sys/uvm/uvm_page.c	Fri Nov 11 10:34:24 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.178.2.2 2011/11/06 22:05:00 yamt Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.178.2.3 2011/11/11 10:34:24 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.2 2011/11/06 22:05:00 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.3 2011/11/11 10:34:24 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -423,7 +423,15 @@
 			if (atop(paddr) >= seg->avail_start &&
 			    atop(paddr) < seg->avail_end) {
 				uvmexp.npages++;
-				/* add page to free pool */
+				/*
+				 * add page to free pool
+				 *
+				 * adjust pagestate[] so that it won't go
+				 * negative.
+				 */
+				KASSERT(uvm_pagegetdirty(&seg->pgs[i])
+				    == UVM_PAGE_STATUS_UNKNOWN);
+				boot_cpu.pagestate[UVM_PAGE_STATUS_UNKNOWN]++;
 				uvm_pagefree(&seg->pgs[i]);
 			}
 		}
@@ -1308,6 +1316,7 @@
 	 * otherwise we race with uvm_pglistalloc.
 	 */
 	pg->pqflags = 0;
+	ucpu->pagestate[UVM_PAGE_STATUS_CLEAN]++;
 	mutex_spin_exit(&uvm_fpageqlock);
 	if (anon) {
 		anon->an_page = pg;
@@ -1470,6 +1479,7 @@
 	struct pgflist *pgfl;
 	struct uvm_cpu *ucpu;
 	int index, color, queue;
+	unsigned int status;
 	bool iszero;
 
 #ifdef DEBUG
@@ -1568,6 +1578,7 @@
 	color = VM_PGCOLOR_BUCKET(pg);
 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
 
+	status = uvm_pagegetdirty(pg);
 #ifdef DEBUG
 	pg->uobject = (void *)0xdeadbeef;
 	pg->uanon = (void *)0xdeadbeef;
@@ -1599,6 +1610,7 @@
 	if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
 		ucpu->page_idle_zero = vm_page_zero_enable;
 	}
+	ucpu->pagestate[status]--;
 
 	mutex_spin_exit(&uvm_fpageqlock);
 }
--- a/sys/uvm/uvm_page.h	Thu Nov 10 23:51:17 2011 +0000
+++ b/sys/uvm/uvm_page.h	Fri Nov 11 10:34:24 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.73.2.2 2011/11/06 22:05:00 yamt Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.73.2.3 2011/11/11 10:34:24 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -163,26 +163,29 @@
  * PG_RDONLY and PG_HOLE acts like a "read-only count".  ie. either of
  * them is set, the page should not be mapped writably.  typically
  * they are set by pgo_get to inform the fault handler.
+ *
+ * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in
+ * uvm_page_status.c first.
  */
 
-#define	PG_BUSY		0x0001		/* page is locked */
-#define	PG_WANTED	0x0002		/* someone is waiting for page */
-#define	PG_TABLED	0x0004		/* page is in VP table  */
-#define	PG_CLEAN	0x0008		/* page is known clean */
+#define	PG_CLEAN	0x0001		/* page is known clean */
+#define	PG_DIRTY	0x0002		/* page is known dirty */
+#define	PG_BUSY		0x0004		/* page is locked */
+#define	PG_WANTED	0x0008		/* someone is waiting for page */
 #define	PG_PAGEOUT	0x0010		/* page to be freed for pagedaemon */
 #define PG_RELEASED	0x0020		/* page to be freed when unbusied */
 #define	PG_FAKE		0x0040		/* page is not yet initialized */
 #define	PG_RDONLY	0x0080		/* page must be mapped read-only */
 #define	PG_ZERO		0x0100		/* page is pre-zero'd */
-#define	PG_MARKER	0x0200		/* dummy marker page */
-#define	PG_DIRTY	0x0400		/* page is known dirty */
-#define	PG_HOLE		0x0800		/* XXX */
+#define	PG_TABLED	0x0200		/* page is in VP table  */
+#define	PG_HOLE		0x0400		/* XXX */
 
 #define PG_PAGER1	0x1000		/* pager-specific flag */
 
 #define	UVM_PGFLAGBITS \
-	"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
-	"\11ZERO\12MARKER\13DIRTY\15PAGER1"
+	"\20\1CLEAN\2DIRTY\3BUSY\4WANTED" \
+	"\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
+	"\11ZERO\12TABLED\13HOLE"
 
 #define PQ_FREE		0x0001		/* page is on free list */
 #define PQ_ANON		0x0002		/* page is part of an anon, rather
@@ -301,10 +304,14 @@
  * dirty or not.
  * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable
  * mapping.
+ *
+ * if you want to renumber these, check __CTASSERTs in
+ * uvm_page_status.c first.
  */
-#define	UVM_PAGE_STATUS_DIRTY	(PG_DIRTY)
 #define	UVM_PAGE_STATUS_UNKNOWN	0
-#define	UVM_PAGE_STATUS_CLEAN	(PG_CLEAN)
+#define	UVM_PAGE_STATUS_CLEAN	1
+#define	UVM_PAGE_STATUS_DIRTY	2
+#define	UVM_PAGE_NUM_STATUS	3
 
 int uvm_page_lookup_freelist(struct vm_page *);
 
--- a/sys/uvm/uvm_page_status.c	Thu Nov 10 23:51:17 2011 +0000
+++ b/sys/uvm/uvm_page_status.c	Fri Nov 11 10:34:24 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page_status.c,v 1.1.2.1 2011/11/02 21:55:39 yamt Exp $	*/
+/*	$NetBSD: uvm_page_status.c,v 1.1.2.2 2011/11/11 10:34:24 yamt Exp $	*/
 
 /*-
  * Copyright (c)2011 YAMAMOTO Takashi,
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.1 2011/11/02 21:55:39 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.1.2.2 2011/11/11 10:34:24 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -41,6 +41,15 @@
  */
 
 /*
+ * these constants are chosen to match so that we can convert between
+ * them quickly.
+ */
+
+__CTASSERT(UVM_PAGE_STATUS_UNKNOWN == 0);
+__CTASSERT(UVM_PAGE_STATUS_DIRTY == PG_DIRTY);
+__CTASSERT(UVM_PAGE_STATUS_CLEAN == PG_CLEAN);
+
+/*
  * uvm_pagegetdirty: return the dirtiness status (one of UVM_PAGE_STATUS_
  * values) of the page.
  */
@@ -58,24 +67,38 @@
 	return pg->flags & (PG_CLEAN|PG_DIRTY);
 }
 
+static void
+stat_update(unsigned int oldstatus, unsigned int newstatus)
+{
+	struct uvm_cpu *ucpu;
+
+	KASSERT(oldstatus != newstatus);
+	kpreempt_disable();
+	ucpu = curcpu()->ci_data.cpu_uvm;
+	ucpu->pagestate[oldstatus]--;
+	ucpu->pagestate[newstatus]++;
+	kpreempt_enable();
+}
+
 /*
  * uvm_pagemarkdirty: set the dirtiness status (one of UVM_PAGE_STATUS_ values)
  * of the page.
  */
 
 void
-uvm_pagemarkdirty(struct vm_page *pg, unsigned int status)
+uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
 {
 	struct uvm_object * const uobj = pg->uobject;
 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
+	const unsigned int oldstatus = uvm_pagegetdirty(pg);
 
-	KASSERT((~status & (PG_CLEAN|PG_DIRTY)) != 0);
-	KASSERT((status & ~(PG_CLEAN|PG_DIRTY)) == 0);
+	KASSERT((~newstatus & (PG_CLEAN|PG_DIRTY)) != 0);
+	KASSERT((newstatus & ~(PG_CLEAN|PG_DIRTY)) == 0);
 	KASSERT(uvm_page_locked_p(pg));
 	KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
 	    radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
 
-	if (uvm_pagegetdirty(pg) == status) {
+	if (oldstatus == newstatus) {
 		return;
 	}
 	/*
@@ -83,7 +106,7 @@
 	 * find possibly-dirty pages quickly.
 	 */
 	if (uobj != NULL) {
-		if (status == UVM_PAGE_STATUS_CLEAN) {
+		if (newstatus == UVM_PAGE_STATUS_CLEAN) {
 			radix_tree_clear_tag(&uobj->uo_pages, idx,
 			    UVM_PAGE_DIRTY_TAG);
 		} else {
@@ -91,16 +114,17 @@
 			    UVM_PAGE_DIRTY_TAG);
 		}
 	}
-	if (status == UVM_PAGE_STATUS_UNKNOWN) {
+	if (newstatus == UVM_PAGE_STATUS_UNKNOWN) {
 		/*
 		 * start relying on pmap-level dirtiness tracking.
 		 */
 		pmap_clear_modify(pg);
 	}
 	pg->flags &= ~(PG_CLEAN|PG_DIRTY);
-	pg->flags |= status;
+	pg->flags |= newstatus;
 	KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
 	    radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
+	stat_update(oldstatus, newstatus);
 }
 
 /*