This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

libgo patch committed: Update to weekly.2012-03-04 release


I have committed a patch to update libgo to the weekly.2012-03-04
release.  As usual, this e-mail message only includes the diffs to files
that are specific to gccgo.  Bootstrapped and ran Go testsuite on
x86_64-unknown-linux-gnu.  Committed to mainline.

Ian

diff -r 99cd08e36ace libgo/MERGE
--- a/libgo/MERGE	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/MERGE	Tue Mar 06 09:42:08 2012 -0800
@@ -1,4 +1,4 @@
-96bd78e7d35e
+f4470a54e6db
 
 The first line of this file holds the Mercurial revision number of the
 last merge done from the master library sources.
diff -r 99cd08e36ace libgo/Makefile.am
--- a/libgo/Makefile.am	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/Makefile.am	Tue Mar 06 09:42:08 2012 -0800
@@ -658,10 +658,17 @@
 go_net_sockopt_file = go/net/sockopt_linux.go
 go_net_sockoptip_file = go/net/sockoptip_linux.go
 else
+if LIBGO_IS_FREEBSD
 go_net_cgo_file = go/net/cgo_bsd.go
 go_net_sock_file = go/net/sock_bsd.go
 go_net_sockopt_file = go/net/sockopt_bsd.go
-go_net_sockoptip_file = go/net/sockoptip_bsd.go
+go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_freebsd.go
+else
+go_net_cgo_file = go/net/cgo_bsd.go
+go_net_sock_file = go/net/sock_bsd.go
+go_net_sockopt_file = go/net/sockopt_bsd.go
+go_net_sockoptip_file = go/net/sockoptip_bsd.go go/net/sockoptip_netbsd.go
+endif
 endif
 endif
 endif
@@ -704,6 +711,7 @@
 	go/net/ipsock.go \
 	go/net/ipsock_posix.go \
 	go/net/lookup_unix.go \
+	go/net/mac.go \
 	go/net/net.go \
 	go/net/parse.go \
 	go/net/pipe.go \
@@ -1126,8 +1134,7 @@
 	go/go/ast/walk.go
 go_go_build_files = \
 	go/go/build/build.go \
-	go/go/build/dir.go \
-	go/go/build/path.go \
+	go/go/build/doc.go \
 	syslist.go
 go_go_doc_files = \
 	go/go/doc/comment.go \
diff -r 99cd08e36ace libgo/runtime/go-main.c
--- a/libgo/runtime/go-main.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/go-main.c	Tue Mar 06 09:42:08 2012 -0800
@@ -40,7 +40,7 @@
 int
 main (int argc, char **argv)
 {
-  runtime_initsig ();
+  runtime_check ();
   runtime_args (argc, (byte **) argv);
   runtime_osinit ();
   runtime_schedinit ();
diff -r 99cd08e36ace libgo/runtime/lock_futex.c
--- a/libgo/runtime/lock_futex.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/lock_futex.c	Tue Mar 06 09:42:08 2012 -0800
@@ -118,8 +118,12 @@
 void
 runtime_notesleep(Note *n)
 {
+	if(runtime_m()->profilehz > 0)
+		runtime_setprof(false);
 	while(runtime_atomicload(&n->key) == 0)
 		runtime_futexsleep(&n->key, 0, -1);
+	if(runtime_m()->profilehz > 0)
+		runtime_setprof(true);
 }
 
 void
@@ -135,14 +139,18 @@
 	if(runtime_atomicload(&n->key) != 0)
 		return;
 
+	if(runtime_m()->profilehz > 0)
+		runtime_setprof(false);
 	deadline = runtime_nanotime() + ns;
 	for(;;) {
 		runtime_futexsleep(&n->key, 0, ns);
 		if(runtime_atomicload(&n->key) != 0)
-			return;
+			break;
 		now = runtime_nanotime();
 		if(now >= deadline)
-			return;
+			break;
 		ns = deadline - now;
 	}
+	if(runtime_m()->profilehz > 0)
+		runtime_setprof(true);
 }
diff -r 99cd08e36ace libgo/runtime/lock_sema.c
--- a/libgo/runtime/lock_sema.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/lock_sema.c	Tue Mar 06 09:42:08 2012 -0800
@@ -159,7 +159,11 @@
 		return;
 	}
 	// Queued.  Sleep.
+	if(m->profilehz > 0)
+		runtime_setprof(false);
 	runtime_semasleep(-1);
+	if(m->profilehz > 0)
+		runtime_setprof(true);
 }
 
 void
@@ -185,12 +189,16 @@
 		return;
 	}
 
+	if(m->profilehz > 0)
+		runtime_setprof(false);
 	deadline = runtime_nanotime() + ns;
 	for(;;) {
 		// Registered.  Sleep.
 		if(runtime_semasleep(ns) >= 0) {
 			// Acquired semaphore, semawakeup unregistered us.
 			// Done.
+			if(m->profilehz > 0)
+				runtime_setprof(true);
 			return;
 		}
 
@@ -203,6 +211,9 @@
 		ns = deadline - now;
 	}
 
+	if(m->profilehz > 0)
+		runtime_setprof(true);
+
 	// Deadline arrived.  Still registered.  Semaphore not acquired.
 	// Want to give up and return, but have to unregister first,
 	// so that any notewakeup racing with the return does not
diff -r 99cd08e36ace libgo/runtime/malloc.goc
--- a/libgo/runtime/malloc.goc	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/malloc.goc	Tue Mar 06 09:42:08 2012 -0800
@@ -277,6 +277,7 @@
 	uintptr arena_size, bitmap_size;
 	extern byte end[];
 	byte *want;
+	uintptr limit;
 
 	runtime_sizeof_C_MStats = sizeof(MStats);
 
@@ -291,10 +292,12 @@
 
 	runtime_InitSizes();
 
+	limit = runtime_memlimit();
+
 	// Set up the allocation arena, a contiguous area of memory where
 	// allocated data will be found.  The arena begins with a bitmap large
 	// enough to hold 4 bits per allocated word.
-	if(sizeof(void*) == 8) {
+	if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
 		// On a 64-bit machine, allocate from a single contiguous reservation.
 		// 16 GB should be big enough for now.
 		//
@@ -343,6 +346,10 @@
 		// of address space, which is probably too much in a 32-bit world.
 		bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
 		arena_size = 512<<20;
+		if(limit > 0 && arena_size+bitmap_size > limit) {
+			bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
+			arena_size = bitmap_size * 8;
+		}
 		
 		// SysReserve treats the address we ask for, end, as a hint,
 		// not as an absolute requirement.  If we ask for the end
@@ -359,6 +366,8 @@
 		p = runtime_SysReserve(want, bitmap_size + arena_size);
 		if(p == nil)
 			runtime_throw("runtime: cannot reserve arena virtual address space");
+		if((uintptr)p & (((uintptr)1<<PageShift)-1))
+			runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, (void*)(bitmap_size+arena_size));
 	}
 	if((uintptr)p & (((uintptr)1<<PageShift)-1))
 		runtime_throw("runtime: SysReserve returned unaligned address");
diff -r 99cd08e36ace libgo/runtime/mgc0.c
--- a/libgo/runtime/mgc0.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/mgc0.c	Tue Mar 06 09:42:08 2012 -0800
@@ -654,14 +654,6 @@
 	scanblock(v, size);
 }
 
-struct root_list {
-	struct root_list *next;
-	struct root {
-		void *decl;
-		size_t size;
-	} roots[];
-};
-
 static struct root_list* roots;
 
 void
diff -r 99cd08e36ace libgo/runtime/mheap.c
--- a/libgo/runtime/mheap.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/mheap.c	Tue Mar 06 09:42:08 2012 -0800
@@ -326,7 +326,7 @@
 }
 
 // Release (part of) unused memory to OS.
-// Goroutine created in runtime_schedinit.
+// Goroutine created at startup.
 // Loop forever.
 void
 runtime_MHeap_Scavenger(void* dummy)
diff -r 99cd08e36ace libgo/runtime/proc.c
--- a/libgo/runtime/proc.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/proc.c	Tue Mar 06 09:42:08 2012 -0800
@@ -416,8 +416,6 @@
 	// Can not enable GC until all roots are registered.
 	// mstats.enablegc = 1;
 	m->nomemprof--;
-
-	scvg = __go_go(runtime_MHeap_Scavenger, nil);
 }
 
 extern void main_init(void) __asm__ ("__go_init_main");
@@ -435,6 +433,7 @@
 	// to preserve the lock.
 	runtime_LockOSThread();
 	runtime_sched.init = true;
+	scvg = __go_go(runtime_MHeap_Scavenger, nil);
 	main_init();
 	runtime_sched.init = false;
 	if(!runtime_sched.lockmain)
@@ -548,7 +547,7 @@
 		m->mcache = runtime_allocmcache();
 
 	runtime_callers(1, m->createstack, nelem(m->createstack));
-	
+
 	// Add to runtime_allm so garbage collector doesn't free m
 	// when it is just in a register or thread-local storage.
 	m->alllink = runtime_allm;
@@ -791,10 +790,11 @@
 		mput(m);
 	}
 
-	// Look for deadlock situation: one single active g which happens to be scvg.
-	if(runtime_sched.grunning == 1 && runtime_sched.gwait == 0) {
-		if(scvg->status == Grunning || scvg->status == Gsyscall)
-			runtime_throw("all goroutines are asleep - deadlock!");
+	// Look for deadlock situation.
+	if((scvg == nil && runtime_sched.grunning == 0) ||
+	   (scvg != nil && runtime_sched.grunning == 1 && runtime_sched.gwait == 0 &&
+	    (scvg->status == Grunning || scvg->status == Gsyscall))) {
+		runtime_throw("all goroutines are asleep - deadlock!");
 	}
 
 	m->nextg = nil;
@@ -1135,6 +1135,9 @@
 {
 	uint32 v;
 
+	if(m->profilehz > 0)
+		runtime_setprof(false);
+
 	// Leave SP around for gc and traceback.
 #ifdef USING_SPLIT_STACK
 	g->gcstack = __splitstack_find(NULL, NULL, &g->gcstack_size,
@@ -1205,6 +1208,9 @@
 #endif
 		gp->gcnext_sp = nil;
 		runtime_memclr(gp->gcregs, sizeof gp->gcregs);
+
+		if(m->profilehz > 0)
+			runtime_setprof(true);
 		return;
 	}
 
diff -r 99cd08e36ace libgo/runtime/runtime.c
--- a/libgo/runtime/runtime.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/runtime.c	Tue Mar 06 09:42:08 2012 -0800
@@ -184,6 +184,21 @@
 	return x;
 }
 
+static struct root_list runtime_roots =
+{ NULL,
+  { { &syscall_Envs, sizeof syscall_Envs },
+    { &os_Args, sizeof os_Args },
+    { NULL, 0 } },
+};
+
+void
+runtime_check(void)
+{
+	__go_register_gc_roots(&runtime_roots);
+
+	runtime_initsig ();
+}
+
 int64
 runtime_cputicks(void)
 {
diff -r 99cd08e36ace libgo/runtime/runtime.h
--- a/libgo/runtime/runtime.h	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/runtime.h	Tue Mar 06 09:42:08 2012 -0800
@@ -427,6 +427,8 @@
 void	runtime_LockOSThread(void) __asm__("libgo_runtime.runtime.LockOSThread");
 void	runtime_UnlockOSThread(void) __asm__("libgo_runtime.runtime.UnlockOSThread");
 
+uintptr	runtime_memlimit(void);
+
 // If appropriate, ask the operating system to control whether this
 // thread should receive profiling signals.  This is only necessary on OS X.
 // An operating system should not deliver a profiling signal to a
@@ -441,3 +443,16 @@
 
 void	runtime_setsig(int32, bool, bool);
 #define runtime_setitimer setitimer
+
+void	runtime_check(void);
+
+// A list of global variables that the garbage collector must scan.
+struct root_list {
+	struct root_list *next;
+	struct root {
+		void *decl;
+		size_t size;
+	} roots[];
+};
+
+void	__go_register_gc_roots(struct root_list*);
diff -r 99cd08e36ace libgo/runtime/thread-linux.c
--- a/libgo/runtime/thread-linux.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/thread-linux.c	Tue Mar 06 09:42:08 2012 -0800
@@ -3,6 +3,16 @@
 // license that can be found in the LICENSE file.
 
 #include "runtime.h"
+#include "defs.h"
+
+// Linux futex.
+//
+//	futexsleep(uint32 *addr, uint32 val)
+//	futexwakeup(uint32 *addr)
+//
+// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
+// Futexwakeup wakes up threads sleeping on addr.
+// Futexsleep is allowed to wake up spuriously.
 
 #include <errno.h>
 #include <string.h>
diff -r 99cd08e36ace libgo/runtime/thread.c
--- a/libgo/runtime/thread.c	Mon Mar 05 09:07:10 2012 -0800
+++ b/libgo/runtime/thread.c	Tue Mar 06 09:42:08 2012 -0800
@@ -4,6 +4,8 @@
 
 #include <errno.h>
 #include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include "runtime.h"
 #include "go-assert.h"
@@ -138,6 +140,7 @@
 	byte* stack;
 	size_t stacksize;
 	stack_t ss;
+	sigset_t sigs;
 
 	// Initialize signal handling.
 	runtime_m()->gsignal = runtime_malg(32*1024, &stack, &stacksize);	// OS X wants >=8K, Linux >=2K
@@ -146,4 +149,34 @@
 	ss.ss_size = stacksize;
 	if(sigaltstack(&ss, nil) < 0)
 		*(int *)0xf1 = 0xf1;
+	if (sigemptyset(&sigs) != 0)
+		runtime_throw("sigemptyset");
+	sigprocmask(SIG_SETMASK, &sigs, nil);
 }
+
+uintptr
+runtime_memlimit(void)
+{
+	struct rlimit rl;
+	uintptr used;
+
+	if(getrlimit(RLIMIT_AS, &rl) != 0)
+		return 0;
+	if(rl.rlim_cur >= 0x7fffffff)
+		return 0;
+
+	// Estimate our VM footprint excluding the heap.
+	// Not an exact science: use size of binary plus
+	// some room for thread stacks.
+	used = (64<<20);
+	if(used >= rl.rlim_cur)
+		return 0;
+
+	// If there's not at least 16 MB left, we're probably
+	// not going to be able to do much.  Treat as no limit.
+	rl.rlim_cur -= used;
+	if(rl.rlim_cur < (16<<20))
+		return 0;
+
+	return rl.rlim_cur - used;
+}

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]