libgo patch committed: Omit nil-channel cases from selectgo order slices

Ian Lance Taylor iant@golang.org
Tue Dec 22 19:57:03 GMT 2020


This patch to libgo omits the nil-channel cases from the order slices
used in the selectgo function.  This is a simplification for future
work.  This is being brought in here as part of updating to the Go
1.16beta1 release.  Bootstrapped and ran Go testsuite on
x86_64-pc-linux-gnu.  Committed to mainline.

Ian
-------------- next part --------------
48357ce4f22c8298ea5fb01d6873bc7bf56180e5
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index bfc7856cce5..a7df8433403 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-c8456995b0118a92820c6c1d8f996d4b1adf55c2
+d0e56e82bb298268ec0f306fef99a715c892d4a7
 
 The first line of this file holds the git revision number of the last
 merge done from the gofrontend repository.
diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go
index 81e00ec5a4e..d5087fbc045 100644
--- a/libgo/go/runtime/select.go
+++ b/libgo/go/runtime/select.go
@@ -41,7 +41,7 @@ func sellock(scases []scase, lockorder []uint16) {
 	var c *hchan
 	for _, o := range lockorder {
 		c0 := scases[o].c
-		if c0 != nil && c0 != c {
+		if c0 != c {
 			c = c0
 			lock(&c.lock)
 		}
@@ -57,11 +57,8 @@ func selunlock(scases []scase, lockorder []uint16) {
 	// the G that calls select runnable again and schedules it for execution.
 	// When the G runs on another M, it locks all the locks and frees sel.
 	// Now if the first M touches sel, it will access freed memory.
-	for i := len(scases) - 1; i >= 0; i-- {
+	for i := len(lockorder) - 1; i >= 0; i-- {
 		c := scases[lockorder[i]].c
-		if c == nil {
-			break
-		}
 		if i > 0 && c == scases[lockorder[i-1]].c {
 			continue // will unlock it on the next iteration
 		}
@@ -138,15 +135,6 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
 	pollorder := order1[:ncases:ncases]
 	lockorder := order1[ncases:][:ncases:ncases]
 
-	// Replace send/receive cases involving nil channels with
-	// caseNil so logic below can assume non-nil channel.
-	for i := range scases {
-		cas := &scases[i]
-		if cas.c == nil && cas.kind != caseDefault {
-			*cas = scase{}
-		}
-	}
-
 	var t0 int64
 	if blockprofilerate > 0 {
 		t0 = cputicks()
@@ -166,15 +154,31 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
 	}
 
 	// generate permuted order
-	for i := 1; i < ncases; i++ {
-		j := fastrandn(uint32(i + 1))
-		pollorder[i] = pollorder[j]
+	dfli := -1
+	norder := 0
+	for i := range scases {
+		cas := &scases[i]
+
+		// Omit cases without channels from the poll and lock orders.
+		if cas.c == nil {
+			if cas.kind == caseDefault {
+				dfli = i
+			}
+			cas.elem = nil // allow GC
+			continue
+		}
+
+		j := fastrandn(uint32(norder + 1))
+		pollorder[norder] = pollorder[j]
 		pollorder[j] = uint16(i)
+		norder++
 	}
+	pollorder = pollorder[:norder]
+	lockorder = lockorder[:norder]
 
 	// sort the cases by Hchan address to get the locking order.
 	// simple heap sort, to guarantee n log n time and constant stack footprint.
-	for i := 0; i < ncases; i++ {
+	for i := range lockorder {
 		j := i
 		// Start with the pollorder to permute cases on the same channel.
 		c := scases[pollorder[i]].c
@@ -185,7 +189,7 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
 		}
 		lockorder[j] = pollorder[i]
 	}
-	for i := ncases - 1; i >= 0; i-- {
+	for i := len(lockorder) - 1; i >= 0; i-- {
 		o := lockorder[i]
 		c := scases[o].c
 		lockorder[i] = lockorder[0]
@@ -209,7 +213,7 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
 	}
 
 	if debugSelect {
-		for i := 0; i+1 < ncases; i++ {
+		for i := 0; i+1 < len(lockorder); i++ {
 			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
 				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
 				throw("select: broken sort")
@@ -233,21 +237,16 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
 
 loop:
 	// pass 1 - look for something already waiting
-	var dfli int
-	var dfl *scase
 	var casi int
 	var cas *scase
 	var caseReleaseTime int64 = -1
 	var recvOK bool
-	for i := 0; i < ncases; i++ {
-		casi = int(pollorder[i])
+	for _, casei := range pollorder {
+		casi = int(casei)
 		cas = &scases[casi]
 		c = cas.c
 
 		switch cas.kind {
-		case caseNil:
-			continue
-
 		case caseRecv:
 			sg = c.sendq.dequeue()
 			if sg != nil {
@@ -271,17 +270,12 @@ loop:
 			if c.qcount < c.dataqsiz {
 				goto bufsend
 			}
-
-		case caseDefault:
-			dfli = casi
-			dfl = cas
 		}
 	}
 
-	if dfl != nil {
+	if dfli >= 0 {
 		selunlock(scases, lockorder)
 		casi = dfli
-		cas = dfl
 		goto retc
 	}
 
@@ -294,9 +288,6 @@ loop:
 	for _, casei := range lockorder {
 		casi = int(casei)
 		cas = &scases[casi]
-		if cas.kind == caseNil {
-			continue
-		}
 		c = cas.c
 		sg := acquireSudog()
 		sg.g = gp
@@ -355,9 +346,6 @@ loop:
 
 	for _, casei := range lockorder {
 		k = &scases[casei]
-		if k.kind == caseNil {
-			continue
-		}
 		if sg == sglist {
 			// sg has already been dequeued by the G that woke us up.
 			casi = int(casei)
@@ -468,7 +456,7 @@ retc:
 
 	// Check preemption, since unlike gc we don't check on every call.
 	// A test case for this one is BenchmarkPingPongHog in proc_test.go.
-	if dfl != nil && getg().preempt {
+	if dfli >= 0 && getg().preempt {
 		checkPreempt()
 	}
 


More information about the Gcc-patches mailing list