Skip to content

Commit

Permalink
Add filter, cond, buffer, and hash details
Browse files Browse the repository at this point in the history
  • Loading branch information
Marcus Gartner committed Oct 23, 2019
1 parent 5ba936c commit 8c9c801
Show file tree
Hide file tree
Showing 5 changed files with 194 additions and 43 deletions.
62 changes: 62 additions & 0 deletions pkg/flame/flame.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,21 @@ package flame

import (
"fmt"
"strings"

"pg_flame/pkg/plan"
)

type Flame struct {
Name string `json:"name"`
Value float64 `json:"value"`
Detail string `json:"detail"`
Children []Flame `json:"children"`
}

func New(p plan.Plan) Flame {
// TODO add planning time frame and total
// TODO handle CTE InitPlan

return convert(p.Root)
}
Expand All @@ -28,14 +31,73 @@ func convert(n plan.Node) Flame {
return Flame{
Name: name(n),
Value: n.TotalTime,
Detail: detail(n),
Children: subFlames,
}
}

func name(n plan.Node) string {
if n.Table != "" && n.Index != "" {
return fmt.Sprintf("%s using %s on %s", n.Method, n.Index, n.Table)
}

if n.Table != "" {
return fmt.Sprintf("%s on %s", n.Method, n.Table)
}

return n.Method
}

func detail(n plan.Node) string {
condWords := make([]string, 0, 5)
if n.Filter != "" {
condWords = append(condWords, fmt.Sprintf("Filter: %s", n.Filter))
}
if n.JoinFilter != "" {
condWords = append(condWords, fmt.Sprintf("Join Filter: %s", n.JoinFilter))
}
if n.HashCond != "" {
condWords = append(condWords, fmt.Sprintf("Hash Cond: %s", n.HashCond))
}
if n.IndexCond != "" {
condWords = append(condWords, fmt.Sprintf("Index Cond: %s", n.IndexCond))
}
if n.RecheckCond != "" {
condWords = append(condWords, fmt.Sprintf("Recheck Cond: %s", n.RecheckCond))
}
cond := strings.Join(condWords, ", ")

bufferWords := make([]string, 0, 2)
if n.BuffersHit != 0 {
bufferWords = append(bufferWords, fmt.Sprintf("Buffers Shared Hit: %v", n.BuffersHit))
}
if n.BuffersRead != 0 {
bufferWords = append(bufferWords, fmt.Sprintf("Buffers Shared Read: %v", n.BuffersRead))
}
buffer := strings.Join(bufferWords, ", ")

hashWords := make([]string, 0, 3)
if n.HashBuckets != 0 {
hashWords = append(hashWords, fmt.Sprintf("Buckets: %v", n.HashBuckets))
}
if n.HashBatches != 0 {
hashWords = append(hashWords, fmt.Sprintf("Batches: %v", n.HashBatches))
}
if n.MemoryUsage != 0 {
hashWords = append(hashWords, fmt.Sprintf("Memory Usage: %vkB", n.MemoryUsage))
}
hash := strings.Join(hashWords, ", ")

sections := make([]string, 0, 3)
if cond != "" {
sections = append(sections, cond)
}
if buffer != "" {
sections = append(sections, buffer)
}
if hash != "" {
sections = append(sections, hash)
}

return strings.Join(sections, " | ")
}
55 changes: 55 additions & 0 deletions pkg/flame/flame_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,65 @@ func Test_name(t *testing.T) {
assert.Equal(t, "Seq Scan on bears", name(n))
})

t.Run("returns the method, index, and table if table exists", func(t *testing.T) {
n := plan.Node{
Method: "Index Scan",
Table: "bears",
Index: "bears_pkey",
}

assert.Equal(t, "Index Scan using bears_pkey on bears", name(n))
})

t.Run("returns the method if there is no table", func(t *testing.T) {
n := plan.Node{Method: "Seq Scan"}

assert.Equal(t, "Seq Scan", name(n))
})

}

func Test_detail(t *testing.T) {

t.Run("returns filter details", func(t *testing.T) {
assert.Equal(t, "Filter: (id = 123)", detail(plan.Node{Filter: "(id = 123)"}))
assert.Equal(t, "Join Filter: (id = 123)", detail(plan.Node{JoinFilter: "(id = 123)"}))
assert.Equal(t, "Hash Cond: (id = 123)", detail(plan.Node{HashCond: "(id = 123)"}))
assert.Equal(t, "Index Cond: (id = 123)", detail(plan.Node{IndexCond: "(id = 123)"}))
assert.Equal(t, "Recheck Cond: (id = 123)", detail(plan.Node{RecheckCond: "(id = 123)"}))
})

t.Run("returns buffer details", func(t *testing.T) {
n := plan.Node{
BuffersHit: 8,
BuffersRead: 5,
}

assert.Equal(t, "Buffers Shared Hit: 8, Buffers Shared Read: 5", detail(n))
})

t.Run("returns hash details", func(t *testing.T) {
n := plan.Node{
MemoryUsage: 12,
HashBuckets: 1024,
HashBatches: 1,
}

assert.Equal(t, "Buckets: 1024, Batches: 1, Memory Usage: 12kB", detail(n))
})

t.Run("returns all information if available", func(t *testing.T) {
n := plan.Node{
Filter: "(id = 123)",
BuffersHit: 8,
BuffersRead: 5,
MemoryUsage: 12,
HashBuckets: 1024,
HashBatches: 1,
}

expected := "Filter: (id = 123) | Buffers Shared Hit: 8, Buffers Shared Read: 5 | Buckets: 1024, Batches: 1, Memory Usage: 12kB"
assert.Equal(t, expected, detail(n))
})

}
11 changes: 4 additions & 7 deletions pkg/html/html.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,22 +93,23 @@ const flameTemplate = `
.transitionEase(d3.easeCubic)
.sort(true)
.title("")
.onClick(onClick)
.differential(false)
.selfValue(false);
var tip = d3.tip()
.direction("s")
.offset([8, 0])
.attr('class', 'd3-flame-graph-tip')
.html(function(d) { return d.data.name + " | " + d.data.value + "ms"; });
.html(function(d) {
return d.data.name + " | " + d.data.value + "ms";
});
flameGraph.tooltip(tip);
var details = document.getElementById("details");
flameGraph.setDetailsElement(details);
var label = function(d) {
return d.data.name + " | " + d.data.value + "ms";;
return d.data.name + " | " + d.data.value + "ms" + " | " + d.data.detail;;
}
flameGraph.label(label);
Expand Down Expand Up @@ -136,10 +137,6 @@ const flameTemplate = `
function resetZoom() {
flameGraph.resetZoom();
}
function onClick(d) {
console.info("Clicked on " + d.data.name);
}
</script>
</body>
</html>
Expand Down
21 changes: 15 additions & 6 deletions pkg/plan/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,21 @@ type Plan struct {
}

type Node struct {
Method string `json:"Node Type"`
Table string `json:"Relation Name"`
// TODO StartupTime float64 `json:"Actual Startup Time"`
TotalTime float64 `json:"Actual Total Time"`
Children []Node `json:"Plans"`
// TODO conditionals and more information
Method string `json:"Node Type"`
Table string `json:"Relation Name"`
Index string `json:"Index Name"`
Filter string `json:"Filter"`
JoinFilter string `json:"Join Filter"`
HashCond string `json:"Hash Cond"`
IndexCond string `json:"Index Cond"`
RecheckCond string `json:"Recheck Cond"`
BuffersHit int `json:"Shared Hit Blocks"`
BuffersRead int `json:"Shared Read Blocks"`
MemoryUsage int `json:"Peak Memory Usage"`
HashBuckets int `json:"Hash Buckets"`
HashBatches int `json:"Hash Batches"`
TotalTime float64 `json:"Actual Total Time"`
Children []Node `json:"Plans"`
}

var ErrEmptyPlanJSON = errors.New("empty plan JSON")
Expand Down
88 changes: 58 additions & 30 deletions pkg/plan/plan_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,26 @@ func TestNew(t *testing.T) {

_, p := New(input)

assert.Equal(t, "Limit", p.Root.Method)
assert.Equal(t, "Nested Loop", p.Root.Method)
assert.Equal(t, "", p.Root.Table)
assert.Equal(t, 0.022, p.Root.TotalTime)

assert.Equal(t, "Seq Scan", p.Root.Children[0].Method)
assert.Equal(t, "bears", p.Root.Children[0].Table)
assert.Equal(t, 0.018, p.Root.Children[0].TotalTime)
assert.Equal(t, 0.049, p.Root.TotalTime)

child := p.Root.Children[0]

assert.Equal(t, "Hash Join", child.Method)
assert.Equal(t, "users", child.Table)
assert.Equal(t, "users_pkey", child.Index)
assert.Equal(t, "((title)::text ~ '.*sql.*'::text)", child.Filter)
assert.Equal(t, "(id = 123)", child.JoinFilter)
assert.Equal(t, "((p.user_id = c.user_id) AND (p.id = c.post_id))", child.HashCond)
assert.Equal(t, "(id = p.user_id)", child.IndexCond)
assert.Equal(t, "(p.user_id = 123)", child.RecheckCond)
assert.Equal(t, 5, child.BuffersHit)
assert.Equal(t, 1, child.BuffersRead)
assert.Equal(t, 8, child.MemoryUsage)
assert.Equal(t, 1024, child.HashBuckets)
assert.Equal(t, 1, child.HashBatches)
assert.Equal(t, 0.049, child.TotalTime)
})

t.Run("returns an error with empty plan JSON", func(t *testing.T) {
Expand Down Expand Up @@ -55,18 +68,20 @@ const planJSON = `
[
{
"Plan": {
"Node Type": "Limit",
"Node Type": "Nested Loop",
"Parallel Aware": false,
"Startup Cost": 0.00,
"Total Cost": 0.11,
"Join Type": "Inner",
"Startup Cost": 265.38,
"Total Cost": 288.42,
"Plan Rows": 1,
"Plan Width": 32,
"Actual Startup Time": 0.022,
"Actual Total Time": 0.022,
"Actual Rows": 1,
"Plan Width": 539,
"Actual Startup Time": 0.049,
"Actual Total Time": 0.049,
"Actual Rows": 0,
"Actual Loops": 1,
"Shared Hit Blocks": 1,
"Shared Read Blocks": 0,
"Inner Unique": true,
"Shared Hit Blocks": 5,
"Shared Read Blocks": 1,
"Shared Dirtied Blocks": 0,
"Shared Written Blocks": 0,
"Local Hit Blocks": 0,
Expand All @@ -77,36 +92,49 @@ const planJSON = `
"Temp Written Blocks": 0,
"Plans": [
{
"Node Type": "Seq Scan",
"Node Type": "Hash Join",
"Relation Name": "users",
"Index Name": "users_pkey",
"Parent Relationship": "Outer",
"Parallel Aware": false,
"Relation Name": "bears",
"Alias": "bears",
"Startup Cost": 0.00,
"Total Cost": 11.00,
"Plan Rows": 100,
"Plan Width": 32,
"Actual Startup Time": 0.018,
"Actual Total Time": 0.018,
"Actual Rows": 1,
"Join Type": "Inner",
"Startup Cost": 13.50,
"Total Cost": 35.06,
"Plan Rows": 1,
"Plan Width": 543,
"Actual Startup Time": 0.049,
"Actual Total Time": 0.049,
"Actual Rows": 0,
"Actual Loops": 1,
"Shared Hit Blocks": 1,
"Shared Read Blocks": 0,
"Inner Unique": false,
"Filter": "((title)::text ~ '.*sql.*'::text)",
"Hash Cond": "((p.user_id = c.user_id) AND (p.id = c.post_id))",
"Index Cond": "(id = p.user_id)",
"Join Filter": "(id = 123)",
"Recheck Cond": "(p.user_id = 123)",
"Hash Buckets": 1024,
"Hash Batches": 1,
"Peak Memory Usage": 8,
"Shared Hit Blocks": 5,
"Shared Read Blocks": 1,
"Shared Dirtied Blocks": 0,
"Shared Written Blocks": 0,
"Local Hit Blocks": 0,
"Local Read Blocks": 0,
"Local Dirtied Blocks": 0,
"Local Written Blocks": 0,
"Temp Read Blocks": 0,
"Temp Written Blocks": 0
"Temp Written Blocks": 0,
"Plans": [
]
}
]
},
"Planning Time": 1.756,
"Planning Time": 2.523,
"Triggers": [
],
"Execution Time": 0.059
"Execution Time": 0.221
}
]
`

0 comments on commit 8c9c801

Please sign in to comment.