// Racectx of m0->g0 is used only as the parent of the main goroutine. // It must not be used for anything else. g.m.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. // Using decimal instead of binary GB and MB because // they look nicer in the stack overflow failure message. // 设置栈的最大值 if sys.PtrSize == 8 { maxstacksize = 1000000000 } else { maxstacksize = 250000000 }
// Allow newproc to start new Ms. // 允许新P创建M mainStarted = true
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon systemstack(func() { newm(sysmon, nil, -1) }) }
// Lock the main goroutine onto this, the main OS thread, // during initialization. Most programs won't care, but a few // do require certain calls to be made by the main thread. // Those can arrange for main.main to run in the main thread // by calling runtime.LockOSThread during initialization // to preserve the lock. lockOSThread()
if g.m != &m0 { throw("runtime.main not on m0") }
doInit(&runtime_inittask) // must be before defer if nanotime() == 0 { throw("nanotime returning zero") }
// Defer unlock so that runtime.Goexit during init does the unlock too. needUnlock := true defer func() { if needUnlock { unlockOSThread() } }()
// Record when the world started. // 记录启动时间 runtimeInitTime = nanotime()
if isarchive || islibrary { // A program compiled with -buildmode=c-archive or c-shared // has a main, but it is not executed. return } // 执行main包的main函数 fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime fn() if raceenabled { racefini() // 竞态检测 }
// Make racy client program work: if panicking on // another goroutine at the same time as main returns, // let the other goroutine finish printing the panic trace. // Once it does, it will exit. See issues 3934 and 20018. // 如果goroutine panic了则创建另一个goroutine打印相关信息,完成之后新建goroutine将退出 if atomic.Load(&runningPanicDefers) != 0 { // Running deferred functions should not take long. for c := 0; c < 1000; c++ { if atomic.Load(&runningPanicDefers) == 0 { break } Gosched() } } if atomic.Load(&panicking) != 0 { gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) }
type m struct { g0 *g // goroutine with scheduling stack morebuf gobuf // gobuf arg to morestack divmod uint32// div/mod denominator for arm - known to liblink
// Fields not known to debuggers. procid uint64// for debuggers, but offset not hard-coded gsignal *g // signal-handling g goSigStack gsignalStack // Go-allocated signal handling stack sigmask sigset // storage for saved signal mask tls [6]uintptr// thread-local storage (for x86 extern register) mstartfn func() curg *g // 当前执行的G,检测gp caughtsig guintptr // goroutine running during fatal signal p puintptr // 绑定的P执行go代码 (如果没有执行go代码则为nil) nextp puintptr oldp puintptr // 执行系统调用之前的P id int64 mallocing int32 throwing int32 preemptoff string// if != "", keep curg running on this m locks int32// m的引用计数 dying int32 profilehz int32 spinning bool// m is out of work and is actively looking for work blocked bool// m is blocked on a note newSigstack bool// minit on C thread called sigaltstack printlock int8 incgo bool// m is executing a cgo call freeWait uint32// if == 0, safe to free g0 and delete m (atomic) fastrand [2]uint32// 两个随机值,不能同时为0 needextram bool traceback uint8 ncgocall uint64// number of cgo calls in total ncgo int32// number of cgo calls currently in progress cgoCallersUse uint32// if non-zero, cgoCallers in use temporarily cgoCallers *cgoCallers // cgo traceback if crashing in cgo call park note alllink *m // on allm schedlink muintptr // M的单链表 lockedg guintptr // M锁定的G createstack [32]uintptr// stack that created this thread. lockedExt uint32// tracking for external LockOSThread lockedInt uint32// tracking for internal lockOSThread nextwaitm muintptr // next m waiting for lock waitunlockf func(*g, unsafe.Pointer)bool waitlock unsafe.Pointer waittraceev byte waittraceskip int startingtrace bool syscalltick uint32 freelink *m // 对应全局休眠M队列
// these are here because they are too large to be on the stack // of low-level NOSPLIT functions. libcall libcall libcallpc uintptr// for cpu profiler libcallsp uintptr libcallg guintptr syscall libcall // stores syscall parameters on windows
vdsoSP uintptr// SP for traceback while in VDSO call (0 if not in call) vdsoPC uintptr// PC for traceback while in VDSO call
// preemptGen counts the number of completed preemption // signals. This is used to detect when a preemption is // requested, but fails. Accessed atomically. preemptGen uint32
// Whether this is a pending preemption signal on this M. // Accessed atomically. signalPending uint32
dlogPerM
mOS
// Up to 10 locks held by this m, maintained by the lock ranking code. locksHeldLen int locksHeld [10]heldLockInfo }
type p struct { id int32 status uint32// one of pidle/prunning/... link puintptr // P的单链表 schedtick uint32// incremented on every scheduler call syscalltick uint32// incremented on every system call sysmontick sysmontick // last tick observed by sysmon m muintptr // 当前的M,如果空闲则为nil mcache *mcache pcache pageCache raceprocctx uintptr
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) deferpoolbuf [5][32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. goidcache uint64 goidcacheend uint64
// Queue of runnable goroutines. Accessed without lock. runqhead uint32// 队首下标 runqtail uint32// 队尾下标 runq [256]guintptr // 可执行队列,最多256个 // runnext, if non-nil, is a runnable G that was ready'd by // the current G and should be run next instead of what's in // runq if there's time remaining in the running G's time // slice. It will inherit the time left in the current time // slice. If a set of goroutines is locked in a // communicate-and-wait pattern, this schedules that set as a // unit and eliminates the (potentially large) scheduling // latency that otherwise arises from adding the ready'd // goroutines to the end of the run queue. runnext guintptr
// Available G's (status == Gdead) gFree struct { gList n int32 }
// Cache of mspan objects from the heap. mspancache struct { // We need an explicit length here because this field is used // in allocation codepaths where write barriers are not allowed, // and eliminating the write barrier/keeping it eliminated from // slice updates is tricky, moreso than just managing the length // ourselves. lenint buf [128]*mspan }
tracebuf traceBufPtr
// traceSweep indicates the sweep events should be traced. // This is used to defer the sweep start event until a span // has actually been swept. traceSweep bool // traceSwept and traceReclaimed track the number of bytes // swept and reclaimed by sweeping in the current sweep loop. traceSwept, traceReclaimed uintptr
palloc persistentAlloc // per-P to avoid mutex
_ uint32// Alignment for atomic fields below
// The when field of the first entry on the timer heap. // This is updated using atomic functions. // This is 0 if the timer heap is empty. timer0When uint64
// Per-P GC state gcAssistTime int64// Nanoseconds in assistAlloc gcFractionalMarkTime int64// Nanoseconds in fractional mark worker (atomic) gcBgMarkWorker guintptr // (atomic) gcMarkWorkerMode gcMarkWorkerMode
// gcMarkWorkerStartTime is the nanotime() at which this mark // worker started. gcMarkWorkerStartTime int64
// gcw is this P's GC work buffer cache. The work buffer is // filled by write barriers, drained by mutator assists, and // disposed on certain GC state transitions. gcw gcWork
// wbBuf is this P's GC write barrier buffer. // // TODO: Consider caching this in the running G. wbBuf wbBuf
runSafePointFn uint32// if 1, run sched.safePointFn at next safe point
// Lock for timers. We normally access the timers while running // on this P, but the scheduler can also do it from a different P. timersLock mutex
// Actions to take at some time. This is used to implement the // standard library's time package. // Must hold timersLock to access. timers []*timer
// Number of timers in P's heap. // Modified using atomic instructions. numTimers uint32
// Number of timerModifiedEarlier timers on P's heap. // This should only be modified while holding timersLock, // or while the timer status is in a transient state // such as timerModifying. adjustTimers uint32
// Number of timerDeleted timers in P's heap. // Modified using atomic instructions. deletedTimers uint32
// Race context used while executing timer functions. timerRaceCtx uintptr
// preempt is set to indicate that this P should be enter the // scheduler ASAP (regardless of what G is running on it). preempt bool
type g struct { // Stack parameters. // stack describes the actual stack memory: [stack.lo, stack.hi). // stackguard0 is the stack pointer compared in the Go stack growth prologue. // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. // stackguard1 is the stack pointer compared in the C stack growth prologue. // It is stack.lo+StackGuard on g0 and gsignal stacks. // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). stack stack // offset known to runtime/cgo stackguard0 uintptr// offset known to liblink stackguard1 uintptr// offset known to liblink
_panic *_panic // innermost panic - offset known to liblink _defer *_defer // innermost defer m *m // 当前M;offset known to arm liblink sched gobuf syscallsp uintptr// if status==Gsyscall, syscallsp = sched.sp to use during gc syscallpc uintptr// if status==Gsyscall, syscallpc = sched.pc to use during gc stktopsp uintptr// expected sp at top of stack, to check in traceback param unsafe.Pointer // passed parameter on wakeup atomicstatus uint32// G的状态 stackLock uint32// sigprof/scang lock; TODO: fold in to atomicstatus goid int64// goroutine id schedlink guintptr // 下一个G的地址 waitsince int64// approx time when the g become blocked waitreason waitReason // if status==Gwaiting
preempt bool// preemption signal, duplicates stackguard0 = stackpreempt preemptStop bool// transition to _Gpreempted on preemption; otherwise, just deschedule preemptShrink bool// shrink stack at synchronous safe point
// asyncSafePoint is set if g is stopped at an asynchronous // safe point. This means there are frames on the stack // without precise pointer information. asyncSafePoint bool
paniconfault bool// panic (instead of crash) on unexpected fault address gcscandone bool// g has scanned stack; protected by _Gscan bit in status throwsplit bool// must not split stack // activeStackChans indicates that there are unlocked channels // pointing into this goroutine's stack. If true, stack // copying needs to acquire channel locks to protect these // areas of the stack. activeStackChans bool
raceignore int8// ignore race detection events sysblocktraced bool// StartTrace has emitted EvGoInSyscall about this goroutine sysexitticks int64// cputicks when syscall has returned (for tracing) traceseq uint64// trace event sequencer tracelastp puintptr // last P emitted an event for this goroutine lockedm muintptr sig uint32 writebuf []byte sigcode0 uintptr sigcode1 uintptr sigpc uintptr gopc uintptr// pc of go statement that created this goroutine ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) startpc uintptr// pc of goroutine function racectx uintptr waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order cgoCtxt []uintptr// cgo traceback context labels unsafe.Pointer // profiler labels timer *timer // cached timer for time.Sleep selectDone uint32// are we participating in a select and did someone win the race?
// Per-G GC state
// gcAssistBytes is this G's GC assist credit in terms of // bytes allocated. If this is positive, then the G has credit // to allocate gcAssistBytes bytes without assisting. If this // is negative, then the G must correct this by performing // scan work. We track this in bytes to make it fast to update // and check for debt in the malloc hot path. The assist ratio // determines how this corresponds to scan work debt. gcAssistBytes int64 }
type schedt struct { // accessed atomically. keep at top to ensure alignment on 32-bit systems. goidgen uint64 lastpoll uint64// time of last network poll, 0 if currently polling pollUntil uint64// time to which current poll is sleeping
lock mutex // 操作锁,锁定schedt
// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be // sure to call checkdead().
midle muintptr // 空闲M单链表 nmidle int32// 空闲M的个数 nmidlelocked int32// number of locked m's waiting for work mnext int64// number of m's that have been created and next M ID maxmcount int32// maximum number of m's allowed (or die) nmsys int32// number of system m's not counted for deadlock nmfreed int64// cumulative number of freed m's
ngsys uint32// number of system goroutines; updated atomically
pidle puintptr // 空闲的P npidle uint32 nmspinning uint32// See "Worker thread parking/unparking" comment in proc.go.
// disable控制有选择的禁用调度 // // 使用schedEnableUser(enable bool)进行控制 // // disable需要被sched.lock保护 disable struct { // user是否禁止调度goroutines. user bool runnable gQueue // pending runnable Gs n int32// length of runnable }
// Global cache of dead G's. gFree struct { lock mutex stack gList // Gs with stacks noStack gList // Gs without stacks n int32 }
// Central cache of sudog structs. sudoglock mutex // 全局空闲G队列锁 sudogcache *sudog // 全局空闲G队列
// Central pool of available defer structs of different sizes. deferlock mutex deferpool [5]*_defer
// freem is the list of m's waiting to be freed when their // m.exited is set. Linked through m.freelink. freem *m // 全局休眠M队列
gcwaiting uint32// gc is waiting to run stopwait int32 stopnote note sysmonwait uint32 sysmonnote note
// safepointFn should be called on each P at the next GC // safepoint if p.runSafePointFn is set. safePointFn func(*p) safePointWait int32 safePointNote note
profilehz int32// cpu profiling rate
procresizetime int64// nanotime() of last change to gomaxprocs totaltime int64// ∫gomaxprocs dt up to procresizetime
// sysmonlock protects sysmon's actions on the runtime. // // Acquire and hold this mutex to block sysmon from interacting // with the rest of the runtime. sysmonlock mutex }
var ( allglen uintptr// allgs的长度 allm *m // 所有m的单链表 allp []*p // 所有的P列表,len(allp) == gomaxprocs,只能通过GOMAXPROCS()修改 allpLock mutex // Protects P-less reads of allp and all writes gomaxprocs int32// 最大P的数量 ncpu int32// CPU数 forcegc forcegcstate sched schedt // 调度者 newprocs int32
// Information about what cpu features are available. // Packages outside the runtime should not use these // as they are not an external api. // Set on startup in asm_{386,amd64}.s processorVersionInfo uint32 isIntel bool lfenceBeforeRdtsc bool
goarm uint8// set by cmd/link on arm systems framepointer_enabled bool// set by cmd/link )