summaryrefslogtreecommitdiffstats
path: root/modules/web/routing/logger_manager.go
blob: aa25ec3a27160abd2c9968bdb6956482748bc356 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
// Copyright 2021 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT

package routing

import (
	"context"
	"net/http"
	"sync"
	"time"

	"code.gitea.io/gitea/modules/graceful"
	"code.gitea.io/gitea/modules/process"
)

// Event indicates when the printer is triggered
type Event int

const (
	// StartEvent at the beginning of a request
	StartEvent Event = iota

	// StillExecutingEvent the request is still executing
	StillExecutingEvent

	// EndEvent the request has ended (either completed or failed)
	EndEvent
)

// Printer is used to output the log for a request
type Printer func(trigger Event, record *requestRecord)

type requestRecordsManager struct {
	print Printer

	lock sync.Mutex

	requestRecords map[uint64]*requestRecord
	count          uint64
}

func (manager *requestRecordsManager) startSlowQueryDetector(threshold time.Duration) {
	go graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) {
		ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: SlowQueryDetector", process.SystemProcessType, true)
		defer finished()
		// This go-routine checks all active requests every second.
		// If a request has been running for a long time (eg: /user/events), we also print a log with "still-executing" message
		// After the "still-executing" log is printed, the record will be removed from the map to prevent from duplicated logs in future

		// We do not care about accurate duration here. It just does the check periodically, 0.5s or 1.5s are all OK.
		t := time.NewTicker(time.Second)
		for {
			select {
			case <-ctx.Done():
				return
			case <-t.C:
				now := time.Now()

				var slowRequests []*requestRecord

				// find all slow requests with lock
				manager.lock.Lock()
				for index, record := range manager.requestRecords {
					if now.Sub(record.startTime) < threshold {
						continue
					}

					slowRequests = append(slowRequests, record)
					delete(manager.requestRecords, index)
				}
				manager.lock.Unlock()

				// print logs for slow requests
				for _, record := range slowRequests {
					manager.print(StillExecutingEvent, record)
				}
			}
		}
	})
}

func (manager *requestRecordsManager) handler(next http.Handler) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		record := &requestRecord{
			startTime:      time.Now(),
			request:        req,
			responseWriter: w,
		}

		// generate a record index an insert into the map
		manager.lock.Lock()
		record.index = manager.count
		manager.count++
		manager.requestRecords[record.index] = record
		manager.lock.Unlock()

		defer func() {
			// just in case there is a panic. now the panics are all recovered in middleware.go
			localPanicErr := recover()
			if localPanicErr != nil {
				record.lock.Lock()
				record.panicError = localPanicErr
				record.lock.Unlock()
			}

			// remove from the record map
			manager.lock.Lock()
			delete(manager.requestRecords, record.index)
			manager.lock.Unlock()

			// log the end of the request
			manager.print(EndEvent, record)

			if localPanicErr != nil {
				// the panic wasn't recovered before us, so we should pass it up, and let the framework handle the panic error
				panic(localPanicErr)
			}
		}()

		req = req.WithContext(context.WithValue(req.Context(), contextKey, record))
		manager.print(StartEvent, record)
		next.ServeHTTP(w, req)
	})
}