Understanding Goroutines in Go Programming
Goroutines are lightweight threads managed by the Go runtime that enable concurrent execution. This guide covers everything you need to know about working with goroutines effectively.
Goroutine Basics
Creating Goroutines
// Basic goroutine
go func() {
fmt.Println("Hello from goroutine!")
}()
// Goroutine with function
func sayHello() {
fmt.Println("Hello!")
}
go sayHello()
// Goroutine with parameters
go func(msg string) {
fmt.Println(msg)
}("Hello")
Waiting for Goroutines
// Using WaitGroup
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
// Do work...
}()
wg.Wait() // Wait for goroutine to finish
// Multiple goroutines
for i := 0; i < 5; i++ {
wg.Add(1)
go func(n int) {
defer wg.Done()
fmt.Printf("Worker %d\n", n)
}(i)
}
wg.Wait()
Synchronization
Channel Communication
// Basic channel communication
ch := make(chan string)
go func() {
ch <- "Hello"
}()
msg := <-ch
// Buffered channels
ch := make(chan int, 2)
ch <- 1 // Won't block
ch <- 2 // Won't block
ch <- 3 // Will block until space available
Mutex Locking
type Counter struct {
mu sync.Mutex
value int
}
func (c *Counter) Increment() {
c.mu.Lock()
defer c.mu.Unlock()
c.value++
}
// Usage
counter := &Counter{}
for i := 0; i < 1000; i++ {
go counter.Increment()
}
Best Practices
1. Goroutine Management
// Good: Controlled goroutine creation
func processItems(items []string) error {
const maxWorkers = 5
sem := make(chan struct{}, maxWorkers)
var wg sync.WaitGroup
for _, item := range items {
wg.Add(1)
sem <- struct{}{} // Acquire semaphore
go func(item string) {
defer func() {
<-sem // Release semaphore
wg.Done()
}()
// Process item...
}(item)
}
wg.Wait()
return nil
}
2. Error Handling
// Good: Error handling in goroutines
func processWithErrors(items []string) error {
errCh := make(chan error, len(items))
var wg sync.WaitGroup
for _, item := range items {
wg.Add(1)
go func(item string) {
defer wg.Done()
if err := process(item); err != nil {
errCh <- fmt.Errorf("processing %s: %w", item, err)
}
}(item)
}
// Wait in separate goroutine
go func() {
wg.Wait()
close(errCh)
}()
// Collect errors
var errs []error
for err := range errCh {
errs = append(errs, err)
}
if len(errs) > 0 {
return fmt.Errorf("multiple errors: %v", errs)
}
return nil
}
3. Context Usage
func worker(ctx context.Context) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
// Do work...
}
}
}
// Usage
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
go worker(ctx)
Common Patterns
1. Worker Pool
func worker(id int, jobs <-chan int, results chan<- int) {
for j := range jobs {
fmt.Printf("worker %d processing job %d\n", id, j)
time.Sleep(time.Second)
results <- j * 2
}
}
func main() {
const numJobs = 5
jobs := make(chan int, numJobs)
results := make(chan int, numJobs)
// Start workers
for w := 1; w <= 3; w++ {
go worker(w, jobs, results)
}
// Send jobs
for j := 1; j <= numJobs; j++ {
jobs <- j
}
close(jobs)
// Collect results
for a := 1; a <= numJobs; a++ {
<-results
}
}
2. Pipeline Pattern
func generator(nums ...int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for _, n := range nums {
out <- n
}
}()
return out
}
func square(in <-chan int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for n := range in {
out <- n * n
}
}()
return out
}
// Usage
nums := generator(1, 2, 3, 4)
squares := square(nums)
for n := range squares {
fmt.Println(n)
}
3. Fan-out, Fan-in
func fanOut(ch <-chan int, n int) []<-chan int {
outputs := make([]<-chan int, n)
for i := 0; i < n; i++ {
outputs[i] = worker(ch)
}
return outputs
}
func fanIn(channels ...<-chan int) <-chan int {
var wg sync.WaitGroup
merged := make(chan int)
output := func(c <-chan int) {
defer wg.Done()
for n := range c {
merged <- n
}
}
wg.Add(len(channels))
for _, c := range channels {
go output(c)
}
go func() {
wg.Wait()
close(merged)
}()
return merged
}
Performance Considerations
1. Goroutine Creation
// Good: Reuse goroutines
func pooledWorker(jobs <-chan func()) {
for job := range jobs {
job()
}
}
// Bad: Create goroutine per task
func perTaskWorker(task func()) {
go task() // Creates new goroutine each time
}
2. Channel Sizing
// Unbuffered: Synchronous communication
ch := make(chan int)
// Buffered: Asynchronous with known size
ch := make(chan int, numberOfWorkers)
// Over-buffered: Waste of memory
ch := make(chan int, 1000000) // Probably too large
Common Mistakes
1. Goroutine Leaks
// Wrong: Leaking goroutine
func leak() {
ch := make(chan int)
go func() {
val := <-ch // Goroutine blocks forever
}()
// Channel never receives a value
}
// Right: Ensure goroutine can exit
func noLeak() {
ch := make(chan int)
go func() {
select {
case val := <-ch:
// Process val
case <-time.After(time.Second):
return
}
}()
}
2. Race Conditions
// Wrong: Race condition
counter := 0
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
counter++ // Race condition
wg.Done()
}()
}
// Right: Use mutex
var mu sync.Mutex
counter := 0
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
mu.Lock()
counter++
mu.Unlock()
wg.Done()
}()
}
3. Loop Variable Capture
// Wrong: Loop variable capture
for i := 0; i < 5; i++ {
go func() {
fmt.Println(i) // All print same value
}()
}
// Right: Pass variable as parameter
for i := 0; i < 5; i++ {
go func(n int) {
fmt.Println(n) // Prints correct values
}(i)
}
Next Steps
- Learn about channels
- Explore select
- Study mutexes
- Practice with worker pools