trace: New package.

Package trace implements tracing of requests.
It exports an HTTP interface on /debug/requests,
and higher level packages (e.g. an RPC system)
can construct and annotate traces.

Change-Id: I67e981ed28bc6a15e4ad5a02217500505896ce1c
Reviewed-on: https://go-review.googlesource.com/10741
Reviewed-by: Nigel Tao <nigeltao@golang.org>
This commit is contained in:
David Symonds
2015-06-05 10:17:32 +10:00
parent a19b75acff
commit e562cdb856
4 changed files with 1700 additions and 0 deletions

356
trace/histogram.go Normal file
View File

@@ -0,0 +1,356 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
// This file implements histogramming for RPC statistics collection.
import (
"bytes"
"fmt"
"html/template"
"log"
"math"
"golang.org/x/net/internal/timeseries"
)
const (
bucketCount = 38
)
// histogram keeps counts of values in buckets that are spaced
// out in powers of 2: 0-1, 2-3, 4-7...
// histogram implements timeseries.Observable
type histogram struct {
sum int64 // running total of measurements
sumOfSquares float64 // square of running total
buckets []int64 // bucketed values for histogram
value int // holds a single value as an optimization
valueCount int64 // number of values recorded for single value
}
// AddMeasurement records a value measurement observation to the histogram.
func (h *histogram) addMeasurement(value int64) {
// TODO: assert invariant
h.sum += value
h.sumOfSquares += float64(value) * float64(value)
bucketIndex := getBucket(value)
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
h.value = bucketIndex
h.valueCount++
} else {
h.allocateBuckets()
h.buckets[bucketIndex]++
}
}
func (h *histogram) allocateBuckets() {
if h.buckets == nil {
h.buckets = make([]int64, bucketCount)
h.buckets[h.value] = h.valueCount
h.value = 0
h.valueCount = -1
}
}
func log2(i int64) int {
n := 0
for ; i >= 0x100; i >>= 8 {
n += 8
}
for ; i > 0; i >>= 1 {
n += 1
}
return n
}
func getBucket(i int64) (index int) {
index = log2(i) - 1
if index < 0 {
index = 0
}
if index >= bucketCount {
index = bucketCount - 1
}
return
}
// Total returns the number of recorded observations.
func (h *histogram) total() (total int64) {
if h.valueCount >= 0 {
total = h.valueCount
}
for _, val := range h.buckets {
total += int64(val)
}
return
}
// Average returns the average value of recorded observations.
func (h *histogram) average() float64 {
t := h.total()
if t == 0 {
return 0
}
return float64(h.sum) / float64(t)
}
// Variance returns the variance of recorded observations.
func (h *histogram) variance() float64 {
t := float64(h.total())
if t == 0 {
return 0
}
s := float64(h.sum) / t
return h.sumOfSquares/t - s*s
}
// StandardDeviation returns the standard deviation of recorded observations.
func (h *histogram) standardDeviation() float64 {
return math.Sqrt(h.variance())
}
// PercentileBoundary estimates the value that the given fraction of recorded
// observations are less than.
func (h *histogram) percentileBoundary(percentile float64) int64 {
total := h.total()
// Corner cases (make sure result is strictly less than Total())
if total == 0 {
return 0
} else if total == 1 {
return int64(h.average())
}
percentOfTotal := round(float64(total) * percentile)
var runningTotal int64
for i := range h.buckets {
value := h.buckets[i]
runningTotal += value
if runningTotal == percentOfTotal {
// We hit an exact bucket boundary. If the next bucket has data, it is a
// good estimate of the value. If the bucket is empty, we interpolate the
// midpoint between the next bucket's boundary and the next non-zero
// bucket. If the remaining buckets are all empty, then we use the
// boundary for the next bucket as the estimate.
j := uint8(i + 1)
min := bucketBoundary(j)
if runningTotal < total {
for h.buckets[j] == 0 {
j++
}
}
max := bucketBoundary(j)
return min + round(float64(max-min)/2)
} else if runningTotal > percentOfTotal {
// The value is in this bucket. Interpolate the value.
delta := runningTotal - percentOfTotal
percentBucket := float64(value-delta) / float64(value)
bucketMin := bucketBoundary(uint8(i))
nextBucketMin := bucketBoundary(uint8(i + 1))
bucketSize := nextBucketMin - bucketMin
return bucketMin + round(percentBucket*float64(bucketSize))
}
}
return bucketBoundary(bucketCount - 1)
}
// Median returns the estimated median of the observed values.
func (h *histogram) median() int64 {
return h.percentileBoundary(0.5)
}
// Add adds other to h.
func (h *histogram) Add(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == 0 {
// Other histogram is empty
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
// Both have a single bucketed value, aggregate them
h.valueCount += o.valueCount
} else {
// Two different values necessitate buckets in this histogram
h.allocateBuckets()
if o.valueCount >= 0 {
h.buckets[o.value] += o.valueCount
} else {
for i := range h.buckets {
h.buckets[i] += o.buckets[i]
}
}
}
h.sumOfSquares += o.sumOfSquares
h.sum += o.sum
}
// Clear resets the histogram to an empty state, removing all observed values.
func (h *histogram) Clear() {
h.buckets = nil
h.value = 0
h.valueCount = 0
h.sum = 0
h.sumOfSquares = 0
}
// CopyFrom copies from other, which must be a *histogram, into h.
func (h *histogram) CopyFrom(other timeseries.Observable) {
o := other.(*histogram)
if o.valueCount == -1 {
h.allocateBuckets()
copy(h.buckets, o.buckets)
}
h.sum = o.sum
h.sumOfSquares = o.sumOfSquares
h.value = o.value
h.valueCount = o.valueCount
}
// Multiply scales the histogram by the specified ratio.
func (h *histogram) Multiply(ratio float64) {
if h.valueCount == -1 {
for i := range h.buckets {
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
}
} else {
h.valueCount = int64(float64(h.valueCount) * ratio)
}
h.sum = int64(float64(h.sum) * ratio)
h.sumOfSquares = h.sumOfSquares * ratio
}
// New creates a new histogram.
func (h *histogram) New() timeseries.Observable {
r := new(histogram)
r.Clear()
return r
}
func (h *histogram) String() string {
return fmt.Sprintf("%d, %f, %d, %d, %v",
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
}
// round returns the closest int64 to the argument
func round(in float64) int64 {
return int64(math.Floor(in + 0.5))
}
// bucketBoundary returns the first value in the bucket.
func bucketBoundary(bucket uint8) int64 {
if bucket == 0 {
return 0
}
return 1 << bucket
}
// bucketData holds data about a specific bucket for use in distTmpl.
type bucketData struct {
Lower, Upper int64
N int64
Pct, CumulativePct float64
GraphWidth int
}
// data holds data about a Distribution for use in distTmpl.
type data struct {
Buckets []*bucketData
Count, Median int64
Mean, StandardDeviation float64
}
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
const maxHTMLBarWidth = 350.0
// newData returns data representing h for use in distTmpl.
func (h *histogram) newData() *data {
// Force the allocation of buckets to simplify the rendering implementation
h.allocateBuckets()
// We scale the bars on the right so that the largest bar is
// maxHTMLBarWidth pixels in width.
maxBucket := int64(0)
for _, n := range h.buckets {
if n > maxBucket {
maxBucket = n
}
}
total := h.total()
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
var pctMult float64
if total == 0 {
pctMult = 1.0
} else {
pctMult = 100.0 / float64(total)
}
buckets := make([]*bucketData, len(h.buckets))
runningTotal := int64(0)
for i, n := range h.buckets {
if n == 0 {
continue
}
runningTotal += n
var upperBound int64
if i < bucketCount-1 {
upperBound = bucketBoundary(uint8(i + 1))
} else {
upperBound = math.MaxInt64
}
buckets[i] = &bucketData{
Lower: bucketBoundary(uint8(i)),
Upper: upperBound,
N: n,
Pct: float64(n) * pctMult,
CumulativePct: float64(runningTotal) * pctMult,
GraphWidth: int(float64(n) * barsizeMult),
}
}
return &data{
Buckets: buckets,
Count: total,
Median: h.median(),
Mean: h.average(),
StandardDeviation: h.standardDeviation(),
}
}
func (h *histogram) html() template.HTML {
buf := new(bytes.Buffer)
if err := distTmpl.Execute(buf, h.newData()); err != nil {
buf.Reset()
log.Printf("net/trace: couldn't execute template: %v", err)
}
return template.HTML(buf.String())
}
// Input: data
var distTmpl = template.Must(template.New("distTmpl").Parse(`
<table>
<tr>
<td style="padding:0.25em">Count: {{.Count}}</td>
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
<td style="padding:0.25em">Median: {{.Median}}</td>
</tr>
</table>
<hr>
<table>
{{range $b := .Buckets}}
{{if $b}}
<tr>
<td style="padding:0 0 0 0.25em">[</td>
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
</tr>
{{end}}
{{end}}
</table>
`))

325
trace/histogram_test.go Normal file
View File

@@ -0,0 +1,325 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"math"
"testing"
)
type sumTest struct {
value int64
sum int64
sumOfSquares float64
total int64
}
var sumTests = []sumTest{
{100, 100, 10000, 1},
{50, 150, 12500, 2},
{50, 200, 15000, 3},
{50, 250, 17500, 4},
}
type bucketingTest struct {
in int64
log int
bucket int
}
var bucketingTests = []bucketingTest{
{0, 0, 0},
{1, 1, 0},
{2, 2, 1},
{3, 2, 1},
{4, 3, 2},
{1000, 10, 9},
{1023, 10, 9},
{1024, 11, 10},
{1000000, 20, 19},
}
type multiplyTest struct {
in int64
ratio float64
expectedSum int64
expectedTotal int64
expectedSumOfSquares float64
}
var multiplyTests = []multiplyTest{
{15, 2.5, 37, 2, 562.5},
{128, 4.6, 758, 13, 77953.9},
}
type percentileTest struct {
fraction float64
expected int64
}
var percentileTests = []percentileTest{
{0.25, 48},
{0.5, 96},
{0.6, 109},
{0.75, 128},
{0.90, 205},
{0.95, 230},
{0.99, 256},
}
func TestSum(t *testing.T) {
var h histogram
for _, test := range sumTests {
h.addMeasurement(test.value)
sum := h.sum
if sum != test.sum {
t.Errorf("h.Sum = %v WANT: %v", sum, test.sum)
}
sumOfSquares := h.sumOfSquares
if sumOfSquares != test.sumOfSquares {
t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares)
}
total := h.total()
if total != test.total {
t.Errorf("h.Total = %v WANT: %v", total, test.total)
}
}
}
func TestMultiply(t *testing.T) {
var h histogram
for i, test := range multiplyTests {
h.addMeasurement(test.in)
h.Multiply(test.ratio)
if h.sum != test.expectedSum {
t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum)
}
if h.total() != test.expectedTotal {
t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal)
}
if h.sumOfSquares != test.expectedSumOfSquares {
t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares)
}
}
}
func TestBucketingFunctions(t *testing.T) {
for _, test := range bucketingTests {
log := log2(test.in)
if log != test.log {
t.Errorf("log2 = %v WANT: %v", log, test.log)
}
bucket := getBucket(test.in)
if bucket != test.bucket {
t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket)
}
}
}
func TestAverage(t *testing.T) {
a := new(histogram)
average := a.average()
if average != 0 {
t.Errorf("Average of empty histogram was %v WANT: 0", average)
}
a.addMeasurement(1)
a.addMeasurement(1)
a.addMeasurement(3)
const expected = float64(5) / float64(3)
average = a.average()
if !isApproximate(average, expected) {
t.Errorf("Average = %g WANT: %v", average, expected)
}
}
func TestStandardDeviation(t *testing.T) {
a := new(histogram)
add(a, 10, 1<<4)
add(a, 10, 1<<5)
add(a, 10, 1<<6)
stdDev := a.standardDeviation()
const expected = 19.95
if !isApproximate(stdDev, expected) {
t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected)
}
// No values
a = new(histogram)
stdDev = a.standardDeviation()
if !isApproximate(stdDev, 0) {
t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
}
add(a, 1, 1<<4)
if !isApproximate(stdDev, 0) {
t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
}
add(a, 10, 1<<4)
if !isApproximate(stdDev, 0) {
t.Errorf("StandardDeviation = %v WANT: 0", stdDev)
}
}
func TestPercentileBoundary(t *testing.T) {
a := new(histogram)
add(a, 5, 1<<4)
add(a, 10, 1<<6)
add(a, 5, 1<<7)
for _, test := range percentileTests {
percentile := a.percentileBoundary(test.fraction)
if percentile != test.expected {
t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected)
}
}
}
func TestCopyFrom(t *testing.T) {
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1}
a.CopyFrom(&b)
if a.String() != b.String() {
t.Errorf("a.String = %s WANT: %s", a.String(), b.String())
}
}
func TestClear(t *testing.T) {
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
a.Clear()
expected := "0, 0.000000, 0, 0, []"
if a.String() != expected {
t.Errorf("a.String = %s WANT %s", a.String(), expected)
}
}
func TestNew(t *testing.T) {
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
b := a.New()
expected := "0, 0.000000, 0, 0, []"
if b.(*histogram).String() != expected {
t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected)
}
}
func TestAdd(t *testing.T) {
// The tests here depend on the associativity of addMeasurement and Add.
// Add empty observation
a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}
b := a.New()
expected := a.String()
a.Add(b)
if a.String() != expected {
t.Errorf("a.String = %s WANT: %s", a.String(), expected)
}
// Add same bucketed value, no new buckets
c := new(histogram)
d := new(histogram)
e := new(histogram)
c.addMeasurement(12)
d.addMeasurement(11)
e.addMeasurement(12)
e.addMeasurement(11)
c.Add(d)
if c.String() != e.String() {
t.Errorf("c.String = %s WANT: %s", c.String(), e.String())
}
// Add bucketed values
f := new(histogram)
g := new(histogram)
h := new(histogram)
f.addMeasurement(4)
f.addMeasurement(12)
f.addMeasurement(100)
g.addMeasurement(18)
g.addMeasurement(36)
g.addMeasurement(255)
h.addMeasurement(4)
h.addMeasurement(12)
h.addMeasurement(100)
h.addMeasurement(18)
h.addMeasurement(36)
h.addMeasurement(255)
f.Add(g)
if f.String() != h.String() {
t.Errorf("f.String = %q WANT: %q", f.String(), h.String())
}
// add buckets to no buckets
i := new(histogram)
j := new(histogram)
k := new(histogram)
j.addMeasurement(18)
j.addMeasurement(36)
j.addMeasurement(255)
k.addMeasurement(18)
k.addMeasurement(36)
k.addMeasurement(255)
i.Add(j)
if i.String() != k.String() {
t.Errorf("i.String = %q WANT: %q", i.String(), k.String())
}
// add buckets to single value (no overlap)
l := new(histogram)
m := new(histogram)
n := new(histogram)
l.addMeasurement(0)
m.addMeasurement(18)
m.addMeasurement(36)
m.addMeasurement(255)
n.addMeasurement(0)
n.addMeasurement(18)
n.addMeasurement(36)
n.addMeasurement(255)
l.Add(m)
if l.String() != n.String() {
t.Errorf("l.String = %q WANT: %q", l.String(), n.String())
}
// mixed order
o := new(histogram)
p := new(histogram)
o.addMeasurement(0)
o.addMeasurement(2)
o.addMeasurement(0)
p.addMeasurement(0)
p.addMeasurement(0)
p.addMeasurement(2)
if o.String() != p.String() {
t.Errorf("o.String = %q WANT: %q", o.String(), p.String())
}
}
func add(h *histogram, times int, val int64) {
for i := 0; i < times; i++ {
h.addMeasurement(val)
}
}
func isApproximate(x, y float64) bool {
return math.Abs(x-y) < 1e-2
}

987
trace/trace.go Normal file
View File

@@ -0,0 +1,987 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package trace implements tracing of requests.
It exports an HTTP interface on /debug/requests.
A request handler might be implemented like this:
func myHandler(w http.ResponseWriter, req *http.Request) {
tr := trace.New("Received", req.URL.Path)
defer tr.Finish()
...
tr.LazyPrintf("some event %q happened", str)
...
if err := somethingImportant(); err != nil {
tr.LazyPrintf("somethingImportant failed: %v", err)
tr.SetError()
}
}
*/
package trace // import "golang.org/x/net/trace"
import (
"bytes"
"fmt"
"html/template"
"io"
"log"
"net"
"net/http"
"runtime"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/internal/timeseries"
)
// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
// FOR DEBUGGING ONLY. This will slow down the program.
var DebugUseAfterFinish = false
// AuthRequest determines whether a specific request is permitted to load the /debug/requests page.
// It returns two bools; the first indicates whether the page may be viewed at all,
// and the second indicates whether sensitive events will be shown.
//
// AuthRequest may be replaced by a program to customise its authorisation requirements.
//
// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1].
var AuthRequest = func(req *http.Request) (any, sensitive bool) {
host, _, err := net.SplitHostPort(req.RemoteAddr)
switch {
case err != nil: // Badly formed address; fail closed.
return false, false
case host == "localhost" || host == "127.0.0.1" || host == "[::1]":
return true, true
default:
return false, false
}
}
func init() {
http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
any, sensitive := AuthRequest(req)
if !any {
http.Error(w, "not allowed", http.StatusUnauthorized)
return
}
render(w, req, sensitive)
})
}
// render renders the HTML page.
// req may be nil.
func render(w io.Writer, req *http.Request, sensitive bool) {
data := &struct {
Families []string
ActiveTraceCount map[string]int
CompletedTraces map[string]*family
// Set when a bucket has been selected.
Traces traceList
Family string
Bucket int
Expanded bool
Traced bool
Active bool
ShowSensitive bool // whether to show sensitive events
Histogram template.HTML
HistogramWindow string // e.g. "last minute", "last hour", "all time"
// If non-zero, the set of traces is a partial set,
// and this is the total number.
Total int
}{
CompletedTraces: completedTraces,
}
data.ShowSensitive = sensitive
if req != nil {
// Allow show_sensitive=0 to force hiding of sensitive data for testing.
// This only goes one way; you can't use show_sensitive=1 to see things.
if req.FormValue("show_sensitive") == "0" {
data.ShowSensitive = false
}
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
data.Expanded = exp
}
if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
data.Traced = exp
}
}
completedMu.RLock()
data.Families = make([]string, 0, len(completedTraces))
for fam, _ := range completedTraces {
data.Families = append(data.Families, fam)
}
completedMu.RUnlock()
sort.Strings(data.Families)
// We are careful here to minimize the time spent locking activeMu,
// since that lock is required every time an RPC starts and finishes.
data.ActiveTraceCount = make(map[string]int, len(data.Families))
activeMu.RLock()
for fam, s := range activeTraces {
data.ActiveTraceCount[fam] = s.Len()
}
activeMu.RUnlock()
var ok bool
data.Family, data.Bucket, ok = parseArgs(req)
switch {
case !ok:
// No-op
case data.Bucket == -1:
data.Active = true
n := data.ActiveTraceCount[data.Family]
data.Traces = getActiveTraces(data.Family)
if len(data.Traces) < n {
data.Total = n
}
case data.Bucket < bucketsPerFamily:
if b := lookupBucket(data.Family, data.Bucket); b != nil {
data.Traces = b.Copy(data.Traced)
}
default:
if f := getFamily(data.Family, false); f != nil {
var obs timeseries.Observable
f.LatencyMu.RLock()
switch o := data.Bucket - bucketsPerFamily; o {
case 0:
obs = f.Latency.Minute()
data.HistogramWindow = "last minute"
case 1:
obs = f.Latency.Hour()
data.HistogramWindow = "last hour"
case 2:
obs = f.Latency.Total()
data.HistogramWindow = "all time"
}
f.LatencyMu.RUnlock()
if obs != nil {
data.Histogram = obs.(*histogram).html()
}
}
}
if data.Traces != nil {
defer data.Traces.Free()
sort.Sort(data.Traces)
}
completedMu.RLock()
defer completedMu.RUnlock()
if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
log.Printf("net/trace: Failed executing template: %v", err)
}
}
func parseArgs(req *http.Request) (fam string, b int, ok bool) {
if req == nil {
return "", 0, false
}
fam, bStr := req.FormValue("fam"), req.FormValue("b")
if fam == "" || bStr == "" {
return "", 0, false
}
b, err := strconv.Atoi(bStr)
if err != nil || b < -1 {
return "", 0, false
}
return fam, b, true
}
func lookupBucket(fam string, b int) *traceBucket {
f := getFamily(fam, false)
if f == nil || b < 0 || b >= len(f.Buckets) {
return nil
}
return f.Buckets[b]
}
// Trace represents an active request.
type Trace interface {
// LazyLog adds x to the event log. It will be evaluated each time the
// /debug/requests page is rendered. Any memory referenced by x will be
// pinned until the trace is finished and later discarded.
LazyLog(x fmt.Stringer, sensitive bool)
// LazyPrintf evaluates its arguments with fmt.Sprintf each time the
// /debug/requests page is rendered. Any memory referenced by a will be
// pinned until the trace is finished and later discarded.
LazyPrintf(format string, a ...interface{})
// SetError declares that this trace resulted in an error.
SetError()
// SetRecycler sets a recycler for the trace.
// f will be called for each event passed to LazyLog at a time when
// it is no longer required, whether while the trace is still active
// and the event is discarded, or when a completed trace is discarded.
SetRecycler(f func(interface{}))
// SetTraceInfo sets the trace info for the trace.
// This is currently unused.
SetTraceInfo(traceID, spanID uint64)
// SetMaxEvents sets the maximum number of events that will be stored
// in the trace. This has no effect if any events have already been
// added to the trace.
SetMaxEvents(m int)
// Finish declares that this trace is complete.
// The trace should not be used after calling this method.
Finish()
}
type lazySprintf struct {
format string
a []interface{}
}
func (l *lazySprintf) String() string {
return fmt.Sprintf(l.format, l.a...)
}
// New returns a new Trace with the specified family and title.
func New(family, title string) Trace {
tr := newTrace()
tr.ref()
tr.Family, tr.Title = family, title
tr.Start = time.Now()
tr.events = make([]event, 0, maxEventsPerTrace)
activeMu.RLock()
s := activeTraces[tr.Family]
activeMu.RUnlock()
if s == nil {
activeMu.Lock()
s = activeTraces[tr.Family] // check again
if s == nil {
s = new(traceSet)
activeTraces[tr.Family] = s
}
activeMu.Unlock()
}
s.Add(tr)
// Trigger allocation of the completed trace structure for this family.
// This will cause the family to be present in the request page during
// the first trace of this family. We don't care about the return value,
// nor is there any need for this to run inline, so we execute it in its
// own goroutine, but only if the family isn't allocated yet.
completedMu.RLock()
if _, ok := completedTraces[tr.Family]; !ok {
go allocFamily(tr.Family)
}
completedMu.RUnlock()
return tr
}
func (tr *trace) Finish() {
tr.Elapsed = time.Now().Sub(tr.Start)
if DebugUseAfterFinish {
buf := make([]byte, 4<<10) // 4 KB should be enough
n := runtime.Stack(buf, false)
tr.finishStack = buf[:n]
}
activeMu.RLock()
m := activeTraces[tr.Family]
activeMu.RUnlock()
m.Remove(tr)
f := getFamily(tr.Family, true)
for _, b := range f.Buckets {
if b.Cond.match(tr) {
b.Add(tr)
}
}
// Add a sample of elapsed time as microseconds to the family's timeseries
h := new(histogram)
h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
f.LatencyMu.Lock()
f.Latency.Add(h)
f.LatencyMu.Unlock()
tr.unref() // matches ref in New
}
const (
bucketsPerFamily = 9
tracesPerBucket = 10
maxActiveTraces = 20 // Maximum number of active traces to show.
maxEventsPerTrace = 10
numHistogramBuckets = 38
)
var (
// The active traces.
activeMu sync.RWMutex
activeTraces = make(map[string]*traceSet) // family -> traces
// Families of completed traces.
completedMu sync.RWMutex
completedTraces = make(map[string]*family) // family -> traces
)
type traceSet struct {
mu sync.RWMutex
m map[*trace]bool
// We could avoid the entire map scan in FirstN by having a slice of all the traces
// ordered by start time, and an index into that from the trace struct, with a periodic
// repack of the slice after enough traces finish; we could also use a skip list or similar.
// However, that would shift some of the expense from /debug/requests time to RPC time,
// which is probably the wrong trade-off.
}
func (ts *traceSet) Len() int {
ts.mu.RLock()
defer ts.mu.RUnlock()
return len(ts.m)
}
func (ts *traceSet) Add(tr *trace) {
ts.mu.Lock()
if ts.m == nil {
ts.m = make(map[*trace]bool)
}
ts.m[tr] = true
ts.mu.Unlock()
}
func (ts *traceSet) Remove(tr *trace) {
ts.mu.Lock()
delete(ts.m, tr)
ts.mu.Unlock()
}
// FirstN returns the first n traces ordered by time.
func (ts *traceSet) FirstN(n int) traceList {
ts.mu.RLock()
defer ts.mu.RUnlock()
if n > len(ts.m) {
n = len(ts.m)
}
trl := make(traceList, 0, n)
// Fast path for when no selectivity is needed.
if n == len(ts.m) {
for tr := range ts.m {
tr.ref()
trl = append(trl, tr)
}
sort.Sort(trl)
return trl
}
// Pick the oldest n traces.
// This is inefficient. See the comment in the traceSet struct.
for tr := range ts.m {
// Put the first n traces into trl in the order they occur.
// When we have n, sort trl, and thereafter maintain its order.
if len(trl) < n {
tr.ref()
trl = append(trl, tr)
if len(trl) == n {
// This is guaranteed to happen exactly once during this loop.
sort.Sort(trl)
}
continue
}
if tr.Start.After(trl[n-1].Start) {
continue
}
// Find where to insert this one.
tr.ref()
i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
trl[n-1].unref()
copy(trl[i+1:], trl[i:])
trl[i] = tr
}
return trl
}
func getActiveTraces(fam string) traceList {
activeMu.RLock()
s := activeTraces[fam]
activeMu.RUnlock()
if s == nil {
return nil
}
return s.FirstN(maxActiveTraces)
}
func getFamily(fam string, allocNew bool) *family {
completedMu.RLock()
f := completedTraces[fam]
completedMu.RUnlock()
if f == nil && allocNew {
f = allocFamily(fam)
}
return f
}
func allocFamily(fam string) *family {
completedMu.Lock()
defer completedMu.Unlock()
f := completedTraces[fam]
if f == nil {
f = newFamily()
completedTraces[fam] = f
}
return f
}
// family represents a set of trace buckets and associated latency information.
type family struct {
// traces may occur in multiple buckets.
Buckets [bucketsPerFamily]*traceBucket
// latency time series
LatencyMu sync.RWMutex
Latency *timeseries.MinuteHourSeries
}
func newFamily() *family {
return &family{
Buckets: [bucketsPerFamily]*traceBucket{
{Cond: minCond(0)},
{Cond: minCond(50 * time.Millisecond)},
{Cond: minCond(100 * time.Millisecond)},
{Cond: minCond(200 * time.Millisecond)},
{Cond: minCond(500 * time.Millisecond)},
{Cond: minCond(1 * time.Second)},
{Cond: minCond(10 * time.Second)},
{Cond: minCond(100 * time.Second)},
{Cond: errorCond{}},
},
Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
}
}
// traceBucket represents a size-capped bucket of historic traces,
// along with a condition for a trace to belong to the bucket.
type traceBucket struct {
Cond cond
// Ring buffer implementation of a fixed-size FIFO queue.
mu sync.RWMutex
buf [tracesPerBucket]*trace
start int // < tracesPerBucket
length int // <= tracesPerBucket
}
func (b *traceBucket) Add(tr *trace) {
b.mu.Lock()
defer b.mu.Unlock()
i := b.start + b.length
if i >= tracesPerBucket {
i -= tracesPerBucket
}
if b.length == tracesPerBucket {
// "Remove" an element from the bucket.
b.buf[i].unref()
b.start++
if b.start == tracesPerBucket {
b.start = 0
}
}
b.buf[i] = tr
if b.length < tracesPerBucket {
b.length++
}
tr.ref()
}
// Copy returns a copy of the traces in the bucket.
// If tracedOnly is true, only the traces with trace information will be returned.
// The logs will be ref'd before returning; the caller should call
// the Free method when it is done with them.
// TODO(dsymonds): keep track of traced requests in separate buckets.
func (b *traceBucket) Copy(tracedOnly bool) traceList {
b.mu.RLock()
defer b.mu.RUnlock()
trl := make(traceList, 0, b.length)
for i, x := 0, b.start; i < b.length; i++ {
tr := b.buf[x]
if !tracedOnly || tr.spanID != 0 {
tr.ref()
trl = append(trl, tr)
}
x++
if x == b.length {
x = 0
}
}
return trl
}
func (b *traceBucket) Empty() bool {
b.mu.RLock()
defer b.mu.RUnlock()
return b.length == 0
}
// cond represents a condition on a trace.
type cond interface {
match(t *trace) bool
String() string
}
type minCond time.Duration
func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
type errorCond struct{}
func (e errorCond) match(t *trace) bool { return t.IsError }
func (e errorCond) String() string { return "errors" }
type traceList []*trace
// Free calls unref on each element of the list.
func (trl traceList) Free() {
for _, t := range trl {
t.unref()
}
}
// traceList may be sorted in reverse chronological order.
func (trl traceList) Len() int { return len(trl) }
func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
// An event is a timestamped log entry in a trace.
type event struct {
When time.Time
Elapsed time.Duration // since previous event in trace
NewDay bool // whether this event is on a different day to the previous event
Recyclable bool // whether this event was passed via LazyLog
What interface{} // string or fmt.Stringer
Sensitive bool // whether this event contains sensitive information
}
// WhenString returns a string representation of the elapsed time of the event.
// It will include the date if midnight was crossed.
func (e event) WhenString() string {
if e.NewDay {
return e.When.Format("2006/01/02 15:04:05.000000")
}
return e.When.Format("15:04:05.000000")
}
// discarded represents a number of discarded events.
// It is stored as *discarded to make it easier to update in-place.
type discarded int
func (d *discarded) String() string {
return fmt.Sprintf("(%d events discarded)", int(*d))
}
// trace represents an active or complete request,
// either sent or received by this program.
type trace struct {
// Family is the top-level grouping of traces to which this belongs.
Family string
// Title is the title of this trace.
Title string
// Timing information.
Start time.Time
Elapsed time.Duration // zero while active
// Trace information if non-zero.
traceID uint64
spanID uint64
// Whether this trace resulted in an error.
IsError bool
// Append-only sequence of events (modulo discards).
mu sync.RWMutex
events []event
refs int32 // how many buckets this is in
recycler func(interface{})
disc discarded // scratch space to avoid allocation
finishStack []byte // where finish was called, if DebugUseAfterFinish is set
}
func (tr *trace) reset() {
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
tr.Family = ""
tr.Title = ""
tr.Start = time.Time{}
tr.Elapsed = 0
tr.traceID = 0
tr.spanID = 0
tr.IsError = false
tr.events = nil
tr.refs = 0
tr.recycler = nil
tr.disc = 0
tr.finishStack = nil
}
// delta returns the elapsed time since the last event or the trace start,
// and whether it spans midnight.
// L >= tr.mu
func (tr *trace) delta(t time.Time) (time.Duration, bool) {
if len(tr.events) == 0 {
return t.Sub(tr.Start), false
}
prev := tr.events[len(tr.events)-1].When
return t.Sub(prev), prev.Day() != t.Day()
}
func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
if DebugUseAfterFinish && tr.finishStack != nil {
buf := make([]byte, 4<<10) // 4 KB should be enough
n := runtime.Stack(buf, false)
log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
}
/*
NOTE TO DEBUGGERS
If you are here because your program panicked in this code,
it is almost definitely the fault of code using this package,
and very unlikely to be the fault of this code.
The most likely scenario is that some code elsewhere is using
a requestz.Trace after its Finish method is called.
You can temporarily set the DebugUseAfterFinish var
to help discover where that is; do not leave that var set,
since it makes this package much less efficient.
*/
e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
tr.mu.Lock()
e.Elapsed, e.NewDay = tr.delta(e.When)
if len(tr.events) < cap(tr.events) {
tr.events = append(tr.events, e)
} else {
// Discard the middle events.
di := int((cap(tr.events) - 1) / 2)
if d, ok := tr.events[di].What.(*discarded); ok {
(*d)++
} else {
// disc starts at two to count for the event it is replacing,
// plus the next one that we are about to drop.
tr.disc = 2
if tr.recycler != nil && tr.events[di].Recyclable {
go tr.recycler(tr.events[di].What)
}
tr.events[di].What = &tr.disc
}
// The timestamp of the discarded meta-event should be
// the time of the last event it is representing.
tr.events[di].When = tr.events[di+1].When
if tr.recycler != nil && tr.events[di+1].Recyclable {
go tr.recycler(tr.events[di+1].What)
}
copy(tr.events[di+1:], tr.events[di+2:])
tr.events[cap(tr.events)-1] = e
}
tr.mu.Unlock()
}
func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
tr.addEvent(x, true, sensitive)
}
func (tr *trace) LazyPrintf(format string, a ...interface{}) {
tr.addEvent(&lazySprintf{format, a}, false, false)
}
func (tr *trace) SetError() { tr.IsError = true }
func (tr *trace) SetRecycler(f func(interface{})) {
tr.recycler = f
}
func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
tr.traceID, tr.spanID = traceID, spanID
}
func (tr *trace) SetMaxEvents(m int) {
// Always keep at least three events: first, discarded count, last.
if len(tr.events) == 0 && m > 3 {
tr.events = make([]event, 0, m)
}
}
func (tr *trace) ref() {
atomic.AddInt32(&tr.refs, 1)
}
func (tr *trace) unref() {
if atomic.AddInt32(&tr.refs, -1) == 0 {
if tr.recycler != nil {
// freeTrace clears tr, so we hold tr.recycler and tr.events here.
go func(f func(interface{}), es []event) {
for _, e := range es {
if e.Recyclable {
f(e.What)
}
}
}(tr.recycler, tr.events)
}
freeTrace(tr)
}
}
func (tr *trace) When() string {
return tr.Start.Format("2006/01/02 15:04:05.000000")
}
func (tr *trace) ElapsedTime() string {
t := tr.Elapsed
if t == 0 {
// Active trace.
t = time.Since(tr.Start)
}
return fmt.Sprintf("%.6f", t.Seconds())
}
func (tr *trace) Events() []event {
tr.mu.RLock()
defer tr.mu.RUnlock()
return tr.events
}
var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
// newTrace returns a trace ready to use.
func newTrace() *trace {
select {
case tr := <-traceFreeList:
return tr
default:
return new(trace)
}
}
// freeTrace adds tr to traceFreeList if there's room.
// This is non-blocking.
func freeTrace(tr *trace) {
if DebugUseAfterFinish {
return // never reuse
}
tr.reset()
select {
case traceFreeList <- tr:
default:
}
}
func elapsed(d time.Duration) string {
b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
// For subsecond durations, blank all zeros before decimal point,
// and all zeros between the decimal point and the first non-zero digit.
if d < time.Second {
dot := bytes.IndexByte(b, '.')
for i := 0; i < dot; i++ {
b[i] = ' '
}
for i := dot + 1; i < len(b); i++ {
if b[i] == '0' {
b[i] = ' '
} else {
break
}
}
}
return string(b)
}
var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
"elapsed": elapsed,
"add": func(a, b int) int { return a + b },
}).Parse(pageHTML))
const pageHTML = `
{{template "Prolog" .}}
{{template "StatusTable" .}}
{{template "Epilog" .}}
{{define "Prolog"}}
<html>
<head>
<title>/debug/requests</title>
<style type="text/css">
body {
font-family: sans-serif;
}
table#tr-status td.family {
padding-right: 2em;
}
table#tr-status td.active {
padding-right: 1em;
}
table#tr-status td.latency-first {
padding-left: 1em;
}
table#tr-status td.empty {
color: #aaa;
}
table#reqs {
margin-top: 1em;
}
table#reqs tr.first {
{{if $.Expanded}}font-weight: bold;{{end}}
}
table#reqs td {
font-family: monospace;
}
table#reqs td.when {
text-align: right;
white-space: nowrap;
}
table#reqs td.elapsed {
padding: 0 0.5em;
text-align: right;
white-space: pre;
width: 10em;
}
address {
font-size: smaller;
margin-top: 5em;
}
</style>
</head>
<body>
<h1>/debug/requests</h1>
{{end}} {{/* end of Prolog */}}
{{define "StatusTable"}}
<table id="tr-status">
{{range $fam := .Families}}
<tr>
<td class="family">{{$fam}}</td>
{{$n := index $.ActiveTraceCount $fam}}
<td class="active {{if not $n}}empty{{end}}">
{{if $n}}<a href="/debug/requests?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
[{{$n}} active]
{{if $n}}</a>{{end}}
</td>
{{$f := index $.CompletedTraces $fam}}
{{range $i, $b := $f.Buckets}}
{{$empty := $b.Empty}}
<td {{if $empty}}class="empty"{{end}}>
{{if not $empty}}<a href="/debug/requests?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
[{{.Cond}}]
{{if not $empty}}</a>{{end}}
</td>
{{end}}
{{$nb := len $f.Buckets}}
<td class="latency-first">
<a href="/debug/requests?fam={{$fam}}&b={{$nb}}">[minute]</a>
</td>
<td>
<a href="/debug/requests?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
</td>
<td>
<a href="/debug/requests?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
</td>
</tr>
{{end}}
</table>
{{end}} {{/* end of StatusTable */}}
{{define "Epilog"}}
{{if $.Traces}}
<hr />
<h3>Family: {{$.Family}}</h3>
{{if or $.Expanded $.Traced}}
<a href="/debug/requests?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
{{else}}
[Normal/Summary]
{{end}}
{{if or (not $.Expanded) $.Traced}}
<a href="/debug/requests?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
{{else}}
[Normal/Expanded]
{{end}}
{{if not $.Active}}
{{if or $.Expanded (not $.Traced)}}
<a href="/debug/requests?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
{{else}}
[Traced/Summary]
{{end}}
{{if or (not $.Expanded) (not $.Traced)}}
<a href="/debug/requests?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
{{else}}
[Traced/Expanded]
{{end}}
{{end}}
{{if $.Total}}
<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
{{end}}
<table id="reqs">
<caption>
{{if $.Active}}Active{{else}}Completed{{end}} Requests
</caption>
<tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
{{range $tr := $.Traces}}
<tr class="first">
<td class="when">{{$tr.When}}</td>
<td class="elapsed">{{$tr.ElapsedTime}}</td>
<td>{{$tr.Title}}</td>
{{/* TODO: include traceID/spanID */}}
</tr>
{{if $.Expanded}}
{{range $tr.Events}}
<tr>
<td class="when">{{.WhenString}}</td>
<td class="elapsed">{{elapsed .Elapsed}}</td>
<td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
</tr>
{{end}}
{{end}}
{{end}}
</table>
{{end}} {{/* if $.Traces */}}
{{if $.Histogram}}
<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
{{$.Histogram}}
{{end}} {{/* if $.Histogram */}}
</body>
</html>
{{end}} {{/* end of Epilog */}}
`

32
trace/trace_test.go Normal file
View File

@@ -0,0 +1,32 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package trace
import (
"reflect"
"testing"
)
type s struct{}
func (s) String() string { return "lazy string" }
// TestReset checks whether all the fields are zeroed after reset.
func TestReset(t *testing.T) {
tr := New("foo", "bar")
tr.LazyLog(s{}, false)
tr.LazyPrintf("%d", 1)
tr.SetRecycler(func(_ interface{}) {})
tr.SetTraceInfo(3, 4)
tr.SetMaxEvents(100)
tr.SetError()
tr.Finish()
tr.(*trace).reset()
if !reflect.DeepEqual(tr, new(trace)) {
t.Errorf("reset didn't clear all fields: %+v", tr)
}
}