- use mutex obj for pageable object yamt-pagecache
authoryamt <yamt@NetBSD.org>
Fri, 18 Nov 2011 00:57:33 +0000
branchyamt-pagecache
changeset 280319 a1d3b9bde8fd
parent 280318 b5710c7b89f0
child 280320 0049bf52b8d8
- use mutex obj for pageable object - add a function to wait for a mutex obj being available - replace some "livelock" kpauses with it
sys/kern/kern_mutex_obj.c
sys/sys/mutex.h
sys/uvm/uvm_aobj.c
sys/uvm/uvm_loan.c
sys/uvm/uvm_page.c
sys/uvm/uvm_page.h
sys/uvm/uvm_pdaemon.c
--- a/sys/kern/kern_mutex_obj.c	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/kern/kern_mutex_obj.c	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_mutex_obj.c,v 1.5 2011/09/27 01:02:38 jym Exp $	*/
+/*	$NetBSD: kern_mutex_obj.c,v 1.5.2.1 2011/11/18 00:57:33 yamt Exp $	*/
 
 /*-
  * Copyright (c) 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.5 2011/09/27 01:02:38 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.5.2.1 2011/11/18 00:57:33 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -134,10 +134,56 @@
 	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
 	     __func__, mo, mo->mo_refcnt);
 
-	if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
+	/*
+	 * if mo_refcnt is 1, no one except us have a reference to it and
+	 * thus it's stable.
+	 */
+	if (mo->mo_refcnt != 1 && atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
 		return false;
 	}
 	mutex_destroy(&mo->mo_lock);
 	pool_cache_put(mutex_obj_cache, mo);
 	return true;
 }
+
+/*
+ * mutex_obj_pause:
+ *
+ *	Pause until lock1 is available.
+ *	Temporarily release and reacquire lock2.
+ *
+ *	Typically used when we need to acquire locks in a reversed order
+ *	and trylock failed.
+ */
+void
+mutex_obj_pause(kmutex_t *lock1, kmutex_t *lock2)
+{
+
+	KASSERT(mutex_owned(lock2));
+	mutex_obj_hold(lock1);
+	mutex_exit(lock2);
+	mutex_enter(lock1);
+	mutex_exit(lock1);
+	mutex_obj_free(lock1);
+	mutex_enter(lock2);
+}
+
+/*
+ * mutex_obj_alloc_kernel_obj_lock:
+ *
+ *	mutex_obj_alloc for kernel object lock.
+ *	used for bootstrap.
+ */
+kmutex_t *
+mutex_obj_alloc_kernel_obj_lock(kmutex_type_t type, int ipl)
+{
+	static struct kmutexobj kernel_obj_lock;
+	struct kmutexobj *mo = &kernel_obj_lock;
+
+	KASSERT(mo->mo_refcnt == 0);
+	mutex_obj_ctor(NULL, mo, 0);
+	mutex_init(&mo->mo_lock, type, ipl);
+	mo->mo_refcnt = 1;
+	return (kmutex_t *)mo;
+}
+
--- a/sys/sys/mutex.h	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/sys/mutex.h	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: mutex.h,v 1.20 2010/02/08 09:54:27 skrll Exp $	*/
+/*	$NetBSD: mutex.h,v 1.20.10.1 2011/11/18 00:57:33 yamt Exp $	*/
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -210,6 +210,8 @@
 kmutex_t *mutex_obj_alloc(kmutex_type_t, int);
 void	mutex_obj_hold(kmutex_t *);
 bool	mutex_obj_free(kmutex_t *);
+void	mutex_obj_pause(kmutex_t *, kmutex_t *);
+kmutex_t *mutex_obj_alloc_kernel_obj_lock(kmutex_type_t, int);
 
 #endif /* _KERNEL */
 
--- a/sys/uvm/uvm_aobj.c	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/uvm/uvm_aobj.c	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_aobj.c,v 1.116.2.4 2011/11/13 01:18:02 yamt Exp $	*/
+/*	$NetBSD: uvm_aobj.c,v 1.116.2.5 2011/11/18 00:57:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.4 2011/11/13 01:18:02 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.5 2011/11/18 00:57:33 yamt Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -437,7 +437,6 @@
 uao_create(vsize_t size, int flags)
 {
 	static struct uvm_aobj kernel_object_store;
-	static kmutex_t kernel_object_lock;
 	static int kobj_alloced = 0;
 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
 	struct uvm_aobj *aobj;
@@ -506,8 +505,8 @@
 	uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
 	if (__predict_false(kernobj)) {
 		/* Initialisation only once, for UAO_FLAG_KERNOBJ. */
-		mutex_init(&kernel_object_lock, MUTEX_DEFAULT, IPL_NONE);
-		uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
+		uvm_obj_setlock(&aobj->u_obj, 
+		    mutex_obj_alloc_kernel_obj_lock(MUTEX_DEFAULT, IPL_NONE));
 	}
 
 	/*
@@ -1195,8 +1194,8 @@
 	 * walk the list of all aobjs.
 	 */
 
+	mutex_enter(&uao_list_lock);
 restart:
-	mutex_enter(&uao_list_lock);
 	for (aobj = LIST_FIRST(&uao_list);
 	     aobj != NULL;
 	     aobj = nextaobj) {
@@ -1208,9 +1207,7 @@
 		 */
 
 		if (!mutex_tryenter(aobj->u_obj.vmobjlock)) {
-			mutex_exit(&uao_list_lock);
-			/* XXX Better than yielding but inadequate. */
-			kpause("livelock", false, 1, NULL);
+			mutex_obj_pause(aobj->u_obj.vmobjlock, &uao_list_lock);
 			goto restart;
 		}
 
--- a/sys/uvm/uvm_loan.c	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/uvm/uvm_loan.c	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_loan.c,v 1.81.2.2 2011/11/06 22:05:00 yamt Exp $	*/
+/*	$NetBSD: uvm_loan.c,v 1.81.2.3 2011/11/18 00:57:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.2 2011/11/06 22:05:00 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.3 2011/11/18 00:57:33 yamt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -983,8 +983,7 @@
 			if (mutex_tryenter(slock)) {
 				break;
 			}
-			/* XXX Better than yielding but inadequate. */
-			kpause("livelock", false, 1, &uvm_pageqlock);
+			mutex_obj_pause(slock, &uvm_pageqlock);
 			slock = NULL;
 		}
 
--- a/sys/uvm/uvm_page.c	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/uvm/uvm_page.c	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.178.2.5 2011/11/13 01:18:02 yamt Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.178.2.6 2011/11/18 00:57:33 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.5 2011/11/13 01:18:02 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.6 2011/11/18 00:57:33 yamt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -2057,17 +2057,12 @@
 bool
 uvm_page_locked_p(struct vm_page *pg)
 {
+	kmutex_t * const lock = uvm_page_getlock(pg);
 
-	if (pg->uobject != NULL) {
-		return mutex_owned(pg->uobject->vmobjlock);
-	}
-	if (pg->uanon != NULL) {
-		return mutex_owned(pg->uanon->an_lock);
-	}
-	return true;
+	return lock == NULL || mutex_owned(lock);
 }
 
-static kmutex_t *
+kmutex_t *
 uvm_page_getlock(struct vm_page *pg)
 {
 
--- a/sys/uvm/uvm_page.h	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/uvm/uvm_page.h	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.73.2.5 2011/11/14 14:21:41 yamt Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.73.2.6 2011/11/18 00:57:34 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -297,6 +297,7 @@
 bool uvm_pagecheckdirty(struct vm_page *, bool);
 bool uvm_pagereadonly_p(struct vm_page *);
 bool uvm_page_locked_p(struct vm_page *);
+kmutex_t *uvm_page_getlock(struct vm_page *);
 bool uvm_page_samelock_p(struct vm_page *, struct vm_page *);
 
 /*
--- a/sys/uvm/uvm_pdaemon.c	Fri Nov 18 00:51:28 2011 +0000
+++ b/sys/uvm/uvm_pdaemon.c	Fri Nov 18 00:57:33 2011 +0000
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pdaemon.c,v 1.103.2.1 2011/11/02 21:54:01 yamt Exp $	*/
+/*	$NetBSD: uvm_pdaemon.c,v 1.103.2.2 2011/11/18 00:57:34 yamt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.103.2.1 2011/11/02 21:54:01 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.103.2.2 2011/11/18 00:57:34 yamt Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -416,23 +416,14 @@
 uvmpd_trylockowner(struct vm_page *pg)
 {
 	struct uvm_object *uobj = pg->uobject;
-	kmutex_t *slock;
+	kmutex_t *lock;
 
 	KASSERT(mutex_owned(&uvm_pageqlock));
-
-	if (uobj != NULL) {
-		slock = uobj->vmobjlock;
-	} else {
-		struct vm_anon *anon = pg->uanon;
-
-		KASSERT(anon != NULL);
-		slock = anon->an_lock;
-	}
-
-	if (!mutex_tryenter(slock)) {
+	lock = uvm_page_getlock(pg);
+	KASSERT(lock != NULL);
+	if (!mutex_tryenter(lock)) {
 		return NULL;
 	}
-
 	if (uobj == NULL) {
 
 		/*
@@ -447,7 +438,7 @@
 		}
 	}
 
-	return slock;
+	return lock;
 }
 
 #if defined(VMSWAP)
@@ -739,10 +730,8 @@
 			 */
 			lockownerfail++;
 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
-				mutex_exit(&uvm_pageqlock);
-				/* XXX Better than yielding but inadequate. */
-				kpause("livelock", false, 1, NULL);
-				mutex_enter(&uvm_pageqlock);
+				mutex_obj_pause(uvm_page_getlock(p),
+				    &uvm_pageqlock);
 				lockownerfail = 0;
 			}
 			continue;