--- /dev/null
+pstop
+*.log
--- /dev/null
+Bugs
+----
+
+See also NEW_FEATURES for things which probably need adding soon.
+
+1. Show the binlogs completely and match them properly
+by looking at the pattern defined in the server. Currently I'm using
+hard-coded stuff which matches personal usage.
+
+2. Only tested on MySQL 5.6. It should be possible to make this work
+on MySQL 5.7 and also MariaDB 10.0 but that has not been tested yet.
--- /dev/null
+New features
+------------
+
+Some thoughts for new features which I hope to get around to.
+See also BUGS which probably need fixing first.
+
+1. performance_schema has several tables showing a wide variety of
+interesting information. So look and see which tables are most
+interesting and adapt pstop to show this.
+
+2. Often we want to look at behaviour over time when we change
+something, so cumulative statistics are not idea: we'd like to see
+a window of data which gradually moves over time. Look at how to
+implement this with ps_top. I'd expect the window to be rather
+small, perhaps 1 minute, or maybe longer triggered by collecting
+data less frequently.
+
+3. vmstat, iostat, pt-diskstats and other such utilities show values
+over time but providing output as stdout. It might be useful to
+have options to pstop to present the info in that sort of format
+too.
+
+4. add some replication metrics to pstop as most of my servers are
+slaves and we want to know where the server is busy. I've seen a
+way to do that and need to incorporate it into pstop.
+
+5. _if_ ps statistics get truncated it would be good to be able
+to remember the latest state and add those metrics back again, _if_
+looking at absolute rather than statistics. I currently truncate
+info after 15-minutes, so this might happen while watching a server
+with pstop.
+
+6. Improve the options for sorting the data. Total latency is good,
+but it would be good to see the write based metrics ordered by total
+write latency. So look to improve the display and sort options here.
+
+7. Add command line options for providing access to the MySQL server.
+Currently uses ~/.my.cnf and that's all.
+
+8. shift-tab to go back a screen.
+
+Issue 3 - termbox - SHIFT + TAB with go - termbox library ...
+code.google.com/p/termbox/issues/detail?id=3
+14 Sep 2010 - If I press Shift + Tab the Poll function will send
+me 3 things successively: 1: Key 27 2: Ch 91 3: Ch 90 It's the only
+Key I know that do that.
+https://github.com/peco/peco/issues/161
--- /dev/null
+// package lib - common routines for pstop
+package lib
+
+import (
+ "fmt"
+ _ "github.com/go-sql-driver/mysql"
+ _ "github.com/sjmudd/pstop/version"
+ "strconv"
+)
+
+const (
+ myname = "pstop"
+ copyright = "Copyright (C) 2014 Simon J Mudd <sjmudd@pobox.com>"
+)
+
+// myround converts this floating value to the right width etc.
+// There must be a function in Go to do this. Find it.
+func myround(f float64, width, decimals int) string {
+ format := "%" + fmt.Sprintf("%d", width) + "." + fmt.Sprintf("%d", decimals) + "f"
+ return fmt.Sprintf(format, f)
+}
+
+// MyName returns the program's name.
+func MyName() string {
+ return myname
+}
+
+// Copyright provides a copyright message for pstop
+func Copyright() string {
+ return copyright
+}
+
+// sec_to_time() converts a number of hours, minutes and seconds into hh:mm:ss format.
+// e.g. 7384 = 2h 3m 4s, 7200 + 180 + 4
+func sec_to_time(d int) string {
+ hours := d / 3600 // integer value
+ minutes := (d - hours*3600) / 60 // integer value
+ seconds := d - hours*3600 - minutes*60
+
+ return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+}
+
+// FormatTime is based on sys.format_time. It
+// formats to 10 characters including space and suffix.
+// All values have 2 decimal places. Zero is returned as
+// an empty string.
+func FormatTime(picoseconds int) string {
+ if picoseconds == 0 {
+ return ""
+ }
+ if picoseconds >= 3600000000000000 {
+ return myround(float64(picoseconds)/3600000000000000, 8, 2) + " h"
+ }
+ if picoseconds >= 60000000000000 {
+ return sec_to_time(picoseconds / 1000000000000)
+ }
+ if picoseconds >= 1000000000000 {
+ return myround(float64(picoseconds)/1000000000000, 8, 2) + " s"
+ }
+ if picoseconds >= 1000000000 {
+ return myround(float64(picoseconds)/1000000000, 7, 2) + " ms"
+ }
+ if picoseconds >= 1000000 {
+ return myround(float64(picoseconds)/1000000, 7, 2) + " us"
+ }
+ if picoseconds >= 1000 {
+ return myround(float64(picoseconds)/1000, 7, 2) + " ns"
+ }
+ return strconv.Itoa(int(picoseconds)) + " ps"
+}
+
+// FormatPct() formats a floating point number as a percentage
+// including the trailing % sign. Print the value as a %5.1f with
+// a % suffix if there's a value.
+// If the value is 0 print as 6 spaces.
+// if the value is > 999.9 then show +++.+% to indicate an overflow.
+func FormatPct(pct float64) string {
+ var s string
+ if pct < 0.0001 {
+ s = " "
+ } else if pct > 999.9 {
+ s = "+++.+%" // too large to fit! (probably a bug as we don't expect this value to be > 100.00)
+ } else {
+ s = fmt.Sprintf("%5.1f", 100.0*pct) + "%"
+ }
+
+ return s
+}
+
+// FormatAmount() convert numbers to k = 1024 , M = 1024 x 1024, G = 1024 x 1024 x 1024, P = 1024x1024x1024x1024.
+// For values = 0 return an empty string.
+// For values < 1000 show 6,2 decimal places.
+// For values >= 1000 show 6,1 decimal place.
+func FormatAmount(amount int) string {
+ var suffix string
+ var formatted string
+ var decimal_amount float64
+
+ if amount == 0 {
+ return ""
+ }
+ if amount <= 1024 {
+ return strconv.Itoa(amount)
+ }
+
+ if amount > (1024 * 1024 * 1024 * 1024) {
+ suffix = "P"
+ decimal_amount = float64(amount) / 1024 / 1024 / 1024 / 1024
+ } else if amount > (1024 * 1024 * 1024) {
+ suffix = "G"
+ decimal_amount = float64(amount) / 1024 / 1024 / 1024
+ } else if amount > (1024 * 1024) {
+ suffix = "M"
+ decimal_amount = float64(amount) / 1024 / 1024
+ } else if amount > 1024 {
+ suffix = "k"
+ decimal_amount = float64(amount) / 1024
+ }
+
+ if decimal_amount > 1000.0 {
+ formatted = fmt.Sprintf("%6.1f %s", decimal_amount, suffix)
+ } else {
+ formatted = fmt.Sprintf("%6.2f %s", decimal_amount, suffix)
+ }
+ return formatted
+}
+
+// MyDivide() divides a by b except if b is 0 in which case we return 0.
+func MyDivide(a int, b int) float64 {
+ if b == 0 {
+ return float64(0)
+ } else {
+ return float64(a) / float64(b)
+ }
+}
+
+// Uptime() provides a usable form of uptime.
+// Note: this doesn't return a string of a fixed size!
+// Minimum value: 1s.
+// Maximum value: 100d 23h 59m 59s (sort of).
+func Uptime(uptime int) string {
+ var result string
+
+ days := uptime / 24 / 60 / 60
+ hours := (uptime - days*86400) / 3600
+ minutes := (uptime - days*86400 - hours*3600) / 60
+ seconds := uptime - days*86400 - hours*3600 - minutes*60
+
+ result = strconv.Itoa(seconds) + "s"
+
+ if minutes > 0 {
+ result = strconv.Itoa(minutes) + "m " + result
+ }
+ if hours > 0 {
+ result = strconv.Itoa(hours) + "h " + result
+ }
+ if days > 0 {
+ result = strconv.Itoa(days) + "d " + result
+ }
+
+ return result
+}
--- /dev/null
+package lib
+
+import (
+ "database/sql"
+ "log"
+)
+
+/*
+** mysql> select VARIABLE_VALUE from global_status where VARIABLE_NAME = 'UPTIME';
+* +----------------+
+* | VARIABLE_VALUE |
+* +----------------+
+* | 251107 |
+* +----------------+
+* 1 row in set (0.00 sec)
+**/
+func SelectGlobalStatusByVariableName(dbh *sql.DB, variable_name string) (error, int) {
+ sql_select := "SELECT VARIABLE_VALUE from INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = ?"
+
+ var variable_value int
+ err := dbh.QueryRow(sql_select, variable_name).Scan(&variable_value)
+ switch {
+ case err == sql.ErrNoRows:
+ log.Println("No setting with that variable_name", variable_name)
+ case err != nil:
+ log.Fatal(err)
+ default:
+ // fmt.Println("variable_value for", variable_name, "is", variable_value)
+ }
+
+ return err, variable_value
+}
--- /dev/null
+package lib
+
+import (
+ "database/sql"
+ "log"
+ "strings"
+)
+
+/*
+** mysql> select VARIABLE_VALUE from information_schema.global_variables where variable_name = 'hostname';
+* +----------------+
+* | VARIABLE_VALUE |
+* +----------------+
+* | myhostname |
+* +----------------+
+* 1 row in set (0.00 sec)
+**/
+func SelectGlobalVariableByVariableName(dbh *sql.DB, variable_name string) (error, string) {
+ sql_select := "SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = ?"
+
+ var variable_value string
+ err := dbh.QueryRow(sql_select, variable_name).Scan(&variable_value)
+ switch {
+ case err == sql.ErrNoRows:
+ log.Println("No setting with that variable_name", variable_name)
+ case err != nil:
+ log.Fatal(err)
+ default:
+ // fmt.Println("variable_value for", variable_name, "is", variable_value)
+ }
+
+ return err, variable_value
+}
+
+// Provide a slice of string and get back a hash of variable_name to value.
+// - note the query is case insensitive for variable names.
+// - they key values are lower-cased.
+func SelectGlobalVariablesByVariableName(dbh *sql.DB, wanted []string) (error, map[string]string) {
+ hashref := make(map[string]string)
+
+ // create an IN list to make up the query
+ quoted := make([]string, 0, len(wanted))
+ sql_select := "SELECT VARIABLE_NAME, VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME IN ("
+
+ if len(wanted) == 0 {
+ log.Fatal("SelectGlobalVariablesByVariableName() needs at least one entry")
+ }
+
+ for i := range wanted {
+ quoted = append(quoted, "'"+wanted[i]+"'")
+
+ }
+ sql_select += strings.Join(quoted, ",")
+ sql_select += ")"
+
+ rows, err := dbh.Query(sql_select)
+ defer rows.Close()
+
+ for rows.Next() {
+ var variable, value string
+ if err := rows.Scan(&variable, &value); err != nil {
+ log.Fatal(err)
+ }
+ hashref[strings.ToLower(variable)] = value
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return err, hashref
+}
+
+// Return all global variables as a hashref
+func SelectAllGlobalVariablesByVariableName(dbh *sql.DB) (error, map[string]string) {
+ hashref := make(map[string]string)
+
+ sql_select := "SELECT VARIABLE_NAME, VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES"
+
+ rows, err := dbh.Query(sql_select)
+ defer rows.Close()
+
+ for rows.Next() {
+ var variable, value string
+ if err := rows.Scan(&variable, &value); err != nil {
+ log.Fatal(err)
+ }
+ hashref[strings.ToLower(variable)] = value
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return err, hashref
+}
--- /dev/null
+// logger - minimal logger shared by everyone
+package lib
+
+import (
+ "log"
+ "os"
+)
+
+// public visible interface
+var Logger *MyLogger
+
+func init() {
+ Logger = new(MyLogger)
+ Logger.EnableLogging(false)
+}
+
+// just add an extra field to enable or not
+type MyLogger struct {
+ logging_enabled bool
+ logger *log.Logger
+}
+
+func (logger *MyLogger) EnableLogging(enable_logging bool) bool {
+ if logger.logging_enabled == enable_logging {
+ return enable_logging // as nothing to do
+ }
+
+ old_value := logger.logging_enabled
+ logger.logging_enabled = enable_logging
+
+ if enable_logging {
+ logfile := MyName() + ".log"
+
+ file, err := os.OpenFile(logfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
+ if err != nil {
+ log.Fatal("Failed to open log file", logfile, ":", err)
+ }
+ logger.logger = log.New(file, "", log.Ldate|log.Ltime|log.Lshortfile)
+ }
+ return old_value
+}
+
+// pass Println() calls downstream if we have a valid logger setup
+func (logger *MyLogger) Println(v ...interface{}) {
+ if logger.logger != nil {
+ logger.logger.Println(v)
+ }
+}
--- /dev/null
+// Top like progream which collects information from MySQL's
+// performance_schema database.
+package main
+
+import (
+ "database/sql"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/nsf/termbox-go"
+
+ "github.com/sjmudd/mysql_defaults_file"
+ "github.com/sjmudd/pstop/lib"
+ "github.com/sjmudd/pstop/state"
+ "github.com/sjmudd/pstop/version"
+)
+
+const (
+ sql_driver = "mysql"
+ db = "performance_schema"
+)
+
+func get_db_handle() *sql.DB {
+ var err error
+ var dbh *sql.DB
+
+ dbh, err = mysql_defaults_file.OpenUsingDefaultsFile(sql_driver, "", "performance_schema")
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err = dbh.Ping(); err != nil {
+ log.Fatal(err)
+ }
+
+ return dbh
+}
+
+// make chan for termbox events and run a poller to send events to the channel
+// - return the channel
+func new_tb_chan() chan termbox.Event {
+ termboxChan := make(chan termbox.Event)
+ go func() {
+ for {
+ termboxChan <- termbox.PollEvent()
+ }
+ }()
+ return termboxChan
+}
+
+func usage() {
+ fmt.Println(lib.MyName() + " - " + lib.Copyright())
+ fmt.Println("")
+ fmt.Println("Top-like program to show MySQL activity by using information collected")
+ fmt.Println("from performance_schema.")
+ fmt.Println("")
+ fmt.Println("Usage: " + lib.MyName() + " <options>")
+ fmt.Println("")
+ fmt.Println("Options:")
+ fmt.Println("-help show this help message")
+ fmt.Println("-version show the version")
+}
+
+func main() {
+ var flag_version = flag.Bool("version", false, "Show the version of "+lib.MyName())
+ var flag_debug = flag.Bool("debug", false, "Enabling debug logging")
+ var flag_help = flag.Bool("help", false, "Provide some help for "+lib.MyName())
+ flag.Parse()
+
+ // clean me up
+ if *flag_debug {
+ lib.Logger.EnableLogging(true)
+ }
+ if *flag_version {
+ fmt.Println(lib.MyName() + " version " + version.Version())
+ return
+ }
+ if *flag_help {
+ usage()
+ return
+ }
+
+ lib.Logger.Println("Starting " + lib.MyName())
+ var state state.State
+
+ interval := time.Second * 1 // for the wait, should be configurable
+ sigChan := make(chan os.Signal, 1)
+ done := make(chan struct{})
+ defer close(done)
+ termboxChan := new_tb_chan()
+
+ signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
+
+ ticker := time.NewTicker(interval) // generate a periodic signal
+
+ state.Setup(get_db_handle())
+
+ finished := false
+ for !finished {
+ select {
+ case <-done:
+ fmt.Println("exiting")
+ finished = true
+ case sig := <-sigChan:
+ fmt.Println("Caught a signal", sig)
+ done <- struct{}{}
+ case <-ticker.C:
+ state.Display()
+ case event := <-termboxChan:
+ // switch on event type
+ switch event.Type {
+ case termbox.EventKey: // actions depend on key
+ switch event.Key {
+ case termbox.KeyCtrlZ, termbox.KeyCtrlC, termbox.KeyEsc:
+ finished = true
+ case termbox.KeyTab: // tab - change display modes
+ state.DisplayNext()
+ }
+ switch event.Ch {
+ case '-': // decrease the interval if > 1
+ if interval > time.Second {
+ ticker.Stop()
+ interval -= time.Second
+ ticker = time.NewTicker(interval)
+ }
+ case '+': // increase interval by creating a new ticker
+ ticker.Stop()
+ interval += time.Second
+ ticker = time.NewTicker(interval)
+ case 'h': // help
+ state.SetHelp(!state.Help())
+ case 'q': // quit
+ finished = true
+ case 't': // toggle between absolute/relative statistics
+ state.SetWantRelativeStats(!state.WantRelativeStats())
+ case 'z': // reset the statistics to now by taking a query of current values
+ state.ResetDBStatistics()
+ }
+ case termbox.EventResize: // set sizes
+ state.ScreenSetSize(event.Width, event.Height)
+ case termbox.EventError: // quit
+ log.Fatalf("Quitting because of termbox error: \n%s\n", event.Err)
+ }
+ }
+ }
+ state.Cleanup()
+ ticker.Stop()
+ lib.Logger.Println("Terminating " + lib.MyName())
+}
--- /dev/null
+// performance_schema - library routines for pstop.
+//
+// This file contains the library routines for managing the
+// file_summary_by_instance table.
+package file_summary_by_instance
+
+import (
+ "database/sql"
+
+ // "github.com/sjmudd/pstop/lib"
+ ps "github.com/sjmudd/pstop/performance_schema"
+)
+
+/*
+CREATE TABLE `file_summary_by_instance` (
+ `FILE_NAME` varchar(512) NOT NULL,
+ `EVENT_NAME` varchar(128) NOT NULL, // not collected
+ `OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL, // not collected
+ `COUNT_STAR` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `COUNT_READ` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `SUM_NUMBER_OF_BYTES_READ` bigint(20) NOT NULL,
+ `COUNT_WRITE` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `SUM_NUMBER_OF_BYTES_WRITE` bigint(20) NOT NULL,
+ `COUNT_MISC` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_MISC` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_MISC` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_MISC` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_MISC` bigint(20) unsigned NOT NULL
+) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
+1 row in set (0.00 sec)
+
+*/
+
+// a table of rows
+type File_summary_by_instance struct {
+ ps.RelativeStats
+ ps.InitialTime
+ initial file_summary_by_instance_rows
+ current file_summary_by_instance_rows
+ results file_summary_by_instance_rows
+ totals file_summary_by_instance_row
+ global_variables map[string]string
+}
+
+// reset the statistics to current values
+func (t *File_summary_by_instance) UpdateInitialValues() {
+ t.SetNow()
+ t.initial = make(file_summary_by_instance_rows, len(t.current))
+ copy(t.initial, t.current)
+
+ t.results = make(file_summary_by_instance_rows, len(t.current))
+ copy(t.results, t.current)
+
+ if t.WantRelativeStats() {
+ t.results.subtract(t.initial) // should be 0 if relative
+ }
+
+ t.results.sort()
+ t.totals = t.results.totals()
+}
+
+// Collect data from the db, then merge it in.
+func (t *File_summary_by_instance) Collect(dbh *sql.DB) {
+ // UPDATE current from db handle
+ t.current = merge_by_table_name(select_fsbi_rows(dbh), t.global_variables)
+
+ // copy in initial data if it was not there
+ if len(t.initial) == 0 && len(t.current) > 0 {
+ t.initial = make(file_summary_by_instance_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ // check for reload initial characteristics
+ if t.initial.needs_refresh(t.current) {
+ t.initial = make(file_summary_by_instance_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ // update results to current value
+ t.results = make(file_summary_by_instance_rows, len(t.current))
+ copy(t.results, t.current)
+
+ // make relative if need be
+ if t.WantRelativeStats() {
+ t.results.subtract(t.initial)
+ }
+
+ // sort the results
+ t.results.sort()
+
+ // setup the totals
+ t.totals = t.results.totals()
+}
+
+// return the headings for a table
+func (t File_summary_by_instance) Headings() string {
+ var r file_summary_by_instance_row
+
+ return r.headings()
+}
+
+// return the rows we need for displaying
+func (t File_summary_by_instance) RowContent(max_rows int) []string {
+ rows := make([]string, 0, max_rows)
+
+ for i := range t.results {
+ if i < max_rows {
+ rows = append(rows, t.results[i].row_content(t.totals))
+ }
+ }
+
+ return rows
+}
+
+// return all the totals
+func (t File_summary_by_instance) TotalRowContent() string {
+ return t.totals.row_content(t.totals)
+}
+
+// return an empty string of data (for filling in)
+func (t File_summary_by_instance) EmptyRowContent() string {
+ var emtpy file_summary_by_instance_row
+ return emtpy.row_content(emtpy)
+}
+
+func (t File_summary_by_instance) Description() string {
+ return "File I/O by filename (file_summary_by_instance)"
+}
+
+// create a new structure and include various variable values:
+// - datadir, relay_log
+// There's no checking that these are actually provided!
+func NewFileSummaryByInstance(global_variables map[string]string) *File_summary_by_instance {
+ n := new(File_summary_by_instance)
+
+ n.global_variables = global_variables
+
+ return n
+}
--- /dev/null
+// This file contains the library routines for managing the
+// file_summary_by_instance table.
+package file_summary_by_instance
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+ "regexp"
+ "sort"
+
+ "github.com/sjmudd/pstop/lib"
+)
+
+/*
+CREATE TABLE `file_summary_by_instance` (
+ `FILE_NAME` varchar(512) NOT NULL,
+ `EVENT_NAME` varchar(128) NOT NULL, // not collected
+ `OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL, // not collected
+ `COUNT_STAR` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `COUNT_READ` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `SUM_NUMBER_OF_BYTES_READ` bigint(20) NOT NULL,
+ `COUNT_WRITE` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `SUM_NUMBER_OF_BYTES_WRITE` bigint(20) NOT NULL,
+ `COUNT_MISC` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_MISC` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_MISC` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_MISC` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_MISC` bigint(20) unsigned NOT NULL
+) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
+1 row in set (0.00 sec)
+*/
+
+type file_summary_by_instance_row struct {
+ FILE_NAME string
+
+ COUNT_STAR int
+ COUNT_READ int
+ COUNT_WRITE int
+ COUNT_MISC int
+
+ SUM_TIMER_WAIT int
+ SUM_TIMER_READ int
+ SUM_TIMER_WRITE int
+ SUM_TIMER_MISC int
+
+ SUM_NUMBER_OF_BYTES_READ int
+ SUM_NUMBER_OF_BYTES_WRITE int
+}
+
+// represents a table or set of rows
+type file_summary_by_instance_rows []file_summary_by_instance_row
+
+// Return the name using the FILE_NAME attribute.
+func (r *file_summary_by_instance_row) name() string {
+ return r.FILE_NAME
+}
+
+// Return a formatted pretty name for the row.
+func (r *file_summary_by_instance_row) pretty_name() string {
+ s := r.name()
+ if len(s) > 30 {
+ s = s[:29]
+ }
+ return fmt.Sprintf("%-30s", s)
+}
+
+func (r *file_summary_by_instance_row) headings() string {
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s|%8s %8s|%8s %6s %6s %6s",
+ "Table Name",
+ "Latency",
+ "%",
+ "Read",
+ "Write",
+ "Misc",
+ "Rd bytes",
+ "Wr bytes",
+ "Ops",
+ "R Ops",
+ "W Ops",
+ "M Ops")
+}
+
+// generate a printable result
+func (row *file_summary_by_instance_row) row_content(totals file_summary_by_instance_row) string {
+ var name string
+
+ // We assume that if COUNT_STAR = 0 then there's no data at all...
+ // when we have no data we really don't want to show the name either.
+ if row.COUNT_STAR == 0 {
+ name = ""
+ } else {
+ name = row.pretty_name()
+ }
+
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s|%8s %8s|%8s %6s %6s %6s",
+ name,
+ lib.FormatTime(row.SUM_TIMER_WAIT),
+ lib.FormatPct(lib.MyDivide(row.SUM_TIMER_WAIT, totals.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(row.SUM_TIMER_READ, row.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(row.SUM_TIMER_WRITE, row.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(row.SUM_TIMER_MISC, row.SUM_TIMER_WAIT)),
+ lib.FormatAmount(row.SUM_NUMBER_OF_BYTES_READ),
+ lib.FormatAmount(row.SUM_NUMBER_OF_BYTES_WRITE),
+ lib.FormatAmount(row.COUNT_STAR),
+ lib.FormatPct(lib.MyDivide(row.COUNT_READ, row.COUNT_STAR)),
+ lib.FormatPct(lib.MyDivide(row.COUNT_WRITE, row.COUNT_STAR)),
+ lib.FormatPct(lib.MyDivide(row.COUNT_MISC, row.COUNT_STAR)))
+}
+
+func (this *file_summary_by_instance_row) add(other file_summary_by_instance_row) {
+ this.COUNT_STAR += other.COUNT_STAR
+ this.COUNT_READ += other.COUNT_READ
+ this.COUNT_WRITE += other.COUNT_WRITE
+ this.COUNT_MISC += other.COUNT_MISC
+
+ this.SUM_TIMER_WAIT += other.SUM_TIMER_WAIT
+ this.SUM_TIMER_READ += other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE += other.SUM_TIMER_WRITE
+ this.SUM_TIMER_MISC += other.SUM_TIMER_MISC
+
+ this.SUM_NUMBER_OF_BYTES_READ += other.SUM_NUMBER_OF_BYTES_READ
+ this.SUM_NUMBER_OF_BYTES_WRITE += other.SUM_NUMBER_OF_BYTES_WRITE
+}
+
+func (this *file_summary_by_instance_row) subtract(other file_summary_by_instance_row) {
+ this.COUNT_STAR -= other.COUNT_STAR
+ this.COUNT_READ -= other.COUNT_READ
+ this.COUNT_WRITE -= other.COUNT_WRITE
+ this.COUNT_MISC -= other.COUNT_MISC
+
+ this.SUM_TIMER_WAIT -= other.SUM_TIMER_WAIT
+ this.SUM_TIMER_READ -= other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE -= other.SUM_TIMER_WRITE
+ this.SUM_TIMER_MISC -= other.SUM_TIMER_MISC
+
+ this.SUM_NUMBER_OF_BYTES_READ -= other.SUM_NUMBER_OF_BYTES_READ
+ this.SUM_NUMBER_OF_BYTES_WRITE -= other.SUM_NUMBER_OF_BYTES_WRITE
+}
+
+// return the totals of a slice of rows
+func (t file_summary_by_instance_rows) totals() file_summary_by_instance_row {
+ var totals file_summary_by_instance_row
+ totals.FILE_NAME = "TOTALS"
+
+ for i := range t {
+ totals.add(t[i])
+ }
+
+ return totals
+}
+
+// clean up the given path reducing redundant stuff and return the clean path
+func cleanup_path(path string) string {
+ // foo/../bar --> bar perl: $new =~ s{[^/]+/\.\./}{/};
+ // foo/./bar --> foo/bar perl: $new =~ s{/\./}{};
+ // // --> / perl: $new =~ s{//}{/};
+ const (
+ double_slash_re = `//`
+ slash_dot_slash_re = `/\./`
+ slash_dot_dot_slash_re = `[^/]+/\.\./`
+ )
+ for {
+ orig_path := path
+
+ r := regexp.MustCompile(double_slash_re)
+ path = r.ReplaceAllString(path, "")
+ r = regexp.MustCompile(slash_dot_slash_re)
+ path = r.ReplaceAllString(path, "")
+ r = regexp.MustCompile(slash_dot_dot_slash_re)
+ path = r.ReplaceAllString(path, "")
+ if orig_path == path { // no change so give up
+ break
+ }
+ }
+
+ return path
+}
+
+// From the original FILE_NAME we want to generate a simpler name to use.
+// This simpler name may also merge several different filenames into one.
+func (t file_summary_by_instance_row) simple_name(global_variables map[string]string) string {
+ const (
+ auto_cnf_re = `/auto\.cnf$`
+ binlog_re = `/binlog\.(\d{6}|index)$`
+ charset_re = `/share/charsets/Index\.xml$`
+ db_opt_re = `/db\.opt$`
+ error_msg_re = `/share/[^/]+/errmsg\.sys$`
+ ibdata_re = `/ibdata\d+$`
+ redo_log_re = `/ib_logfile\d+$`
+ pid_file_re = `/[^/]+\.pid$`
+ // relay_log_re = `/mysql-relay-bin.(\d{6}|index)$`
+ relative_path_re = `^\.\./`
+ current_dir_re = `^\./`
+ slowlog_re = `/slowlog$`
+ table_file_re = `/([^/]+)/([^/]+)\.(frm|ibd|MYD|MYI|CSM|CSV|par)$`
+ temp_table_re = `#sql-[0-9_]+`
+ part_table_re = `(.+)#P#p\d+`
+ )
+
+ path := t.FILE_NAME
+
+ // this should probably be ordered from most expected regexp to least
+ re := regexp.MustCompile(table_file_re)
+ if m1 := re.FindStringSubmatch(path); m1 != nil {
+ // we may match temporary tables so check for them
+ re2 := regexp.MustCompile(temp_table_re)
+ if m2 := re2.FindStringSubmatch(m1[2]); m2 != nil {
+ return "<temp_table>"
+ }
+
+ // we may match partitioned tables so check for them
+ re3 := regexp.MustCompile(part_table_re)
+ if m3 := re3.FindStringSubmatch(m1[2]); m3 != nil {
+ return m1[1] + "." + m3[1] // <schema>.<table> (less partition info)
+ }
+
+ return m1[1] + "." + m1[2] // <schema>.<table>
+ }
+ if regexp.MustCompile(ibdata_re).MatchString(path) == true {
+ return "<ibdata>"
+ }
+ if regexp.MustCompile(redo_log_re).MatchString(path) == true {
+ return "<redo_log>"
+ }
+ if regexp.MustCompile(binlog_re).MatchString(path) == true {
+ return "<binlog>"
+ }
+ if regexp.MustCompile(db_opt_re).MatchString(path) == true {
+ return "<db_opt>"
+ }
+ if regexp.MustCompile(slowlog_re).MatchString(path) == true {
+ return "<slow_log>"
+ }
+ if regexp.MustCompile(auto_cnf_re).MatchString(path) == true {
+ return "<auto_cnf>"
+ }
+ // relay logs are a bit complicated. If a full path then easy to
+ // identify,but if a relative path we may need to add $datadir,
+ // but also if as I do we have a ../blah/somewhere/path then we
+ // need to make it match too.
+ if len(global_variables["relay_log"]) > 0 {
+ relay_log := global_variables["relay_log"]
+ if relay_log[0] != '/' { // relative path
+ relay_log = cleanup_path(global_variables["datadir"] + relay_log) // datadir always ends in /
+ }
+ relay_log_re := relay_log + `\.(\d{6}|index)$`
+ if regexp.MustCompile(relay_log_re).MatchString(path) == true {
+ return "<relay_log>"
+ }
+ }
+ if regexp.MustCompile(pid_file_re).MatchString(path) == true {
+ return "<pid_file>"
+ }
+ if regexp.MustCompile(error_msg_re).MatchString(path) == true {
+ return "<errmsg>"
+ }
+ if regexp.MustCompile(charset_re).MatchString(path) == true {
+ return "<charset>"
+ }
+ return path
+}
+
+// Convert the imported "table" to a merged one with merged data.
+// Combine all entries with the same "FILE_NAME" by adding their values.
+func merge_by_table_name(orig file_summary_by_instance_rows, global_variables map[string]string) file_summary_by_instance_rows {
+ t := make(file_summary_by_instance_rows, 0, len(orig))
+
+ m := make(map[string]file_summary_by_instance_row)
+
+ // iterate over source table
+ for i := range orig {
+ var file_name string
+ var new_row file_summary_by_instance_row
+ orig_row := orig[i]
+
+ if orig_row.COUNT_STAR > 0 {
+ file_name = orig_row.simple_name(global_variables)
+
+ // check if we have an entry in the map
+ if _, found := m[file_name]; found {
+ new_row = m[file_name]
+ } else {
+ new_row.FILE_NAME = file_name
+ }
+ new_row.add(orig_row)
+ m[file_name] = new_row // update the map with the new value
+ }
+ }
+
+ // add the map contents back into the table
+ for _, row := range m {
+ t = append(t, row)
+ }
+
+ return t
+}
+
+// Select the raw data from the database into file_summary_by_instance_rows
+// - filter out empty values
+// - merge rows with the same name into a single row
+// - change FILE_NAME into a more descriptive value.
+func select_fsbi_rows(dbh *sql.DB) file_summary_by_instance_rows {
+ var t file_summary_by_instance_rows
+
+ sql := "SELECT FILE_NAME, COUNT_STAR, SUM_TIMER_WAIT, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM file_summary_by_instance"
+
+ rows, err := dbh.Query(sql)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var r file_summary_by_instance_row
+
+ if err := rows.Scan(&r.FILE_NAME, &r.COUNT_STAR, &r.SUM_TIMER_WAIT, &r.COUNT_READ, &r.SUM_TIMER_READ, &r.SUM_NUMBER_OF_BYTES_READ, &r.COUNT_WRITE, &r.SUM_TIMER_WRITE, &r.SUM_NUMBER_OF_BYTES_WRITE, &r.COUNT_MISC, &r.SUM_TIMER_MISC); err != nil {
+ log.Fatal(err)
+ }
+ t = append(t, r)
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return t
+}
+
+// remove the initial values from those rows where there's a match
+// - if we find a row we can't match ignore it
+func (this *file_summary_by_instance_rows) subtract(initial file_summary_by_instance_rows) {
+ i_by_name := make(map[string]int)
+
+ // iterate over rows by name
+ for i := range initial {
+ i_by_name[initial[i].name()] = i
+ }
+
+ for i := range *this {
+ if _, ok := i_by_name[(*this)[i].name()]; ok {
+ initial_i := i_by_name[(*this)[i].name()]
+ (*this)[i].subtract(initial[initial_i])
+ }
+ }
+}
+
+func (t file_summary_by_instance_rows) Len() int { return len(t) }
+func (t file_summary_by_instance_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t file_summary_by_instance_rows) Less(i, j int) bool {
+ return (t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT) ||
+ ((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) && (t[i].FILE_NAME < t[j].FILE_NAME))
+}
+
+func (t *file_summary_by_instance_rows) sort() {
+ sort.Sort(t)
+}
+
+// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
+// check this by comparing totals.
+func (t file_summary_by_instance_rows) needs_refresh(t2 file_summary_by_instance_rows) bool {
+ my_totals := t.totals()
+ t2_totals := t2.totals()
+
+ return my_totals.SUM_TIMER_WAIT > t2_totals.SUM_TIMER_WAIT
+}
--- /dev/null
+// This file contains the library routines for managing the
+// table_lock_waits_summary_by_table table.
+package ps_table
+
+import (
+ "database/sql"
+ "time"
+)
+
+// a table of rows
+type Tabler interface {
+ Collect(dbh *sql.DB)
+ UpdateInitialValues()
+ Headings() string
+ RowContent(max_rows int) []string
+ TotalRowContent() string
+ EmptyRowContent() string
+ Description() string
+ SetNow()
+ Last() time.Time
+ SetWantRelativeStats(want_relative_stats bool)
+}
--- /dev/null
+// This file contains the library routines for managing the
+// table_lock_waits_summary_by_table table.
+package ps_table
+
+/* *****
+import (
+ "database/sql"
+ "fmt"
+ _ "github.com/go-sql-driver/mysql"
+ "log"
+ "sort"
+ "strings"
+
+ "github.com/sjmudd/pstop/lib"
+)
+
+type table_lock_waits_summary_by_table_row struct {
+ OBJECT_TYPE string // in theory redundant but keep anyway
+ OBJECT_SCHEMA string // in theory redundant but keep anyway
+ OBJECT_NAME string // in theory redundant but keep anyway
+ COUNT_STAR int
+
+ SUM_TIMER_WAIT int
+ SUM_TIMER_READ int
+ SUM_TIMER_WRITE int
+
+ SUM_TIMER_READ_WITH_SHARED_LOCKS int
+ SUM_TIMER_READ_HIGH_PRIORITY int
+ SUM_TIMER_READ_NO_INSERT int
+ SUM_TIMER_READ_NORMAL int
+ SUM_TIMER_READ_EXTERNAL int
+
+ SUM_TIMER_WRITE_ALLOW_WRITE int
+ SUM_TIMER_WRITE_CONCURRENT_INSERT int
+ SUM_TIMER_WRITE_DELAYED int
+ SUM_TIMER_WRITE_LOW_PRIORITY int
+ SUM_TIMER_WRITE_NORMAL int
+ SUM_TIMER_WRITE_EXTERNAL int
+}
+
+type table_lock_waits_summary_by_table_rows []table_lock_waits_summary_by_table_row
+
+// return the table name from the columns as '<schema>.<table>'
+func (r *table_lock_waits_summary_by_table_row) name() string {
+ var n string
+ if len(r.OBJECT_SCHEMA) > 0 {
+ n += r.OBJECT_SCHEMA
+ }
+ if len(n) > 0 {
+ if len(r.OBJECT_NAME) > 0 {
+ n += "." + r.OBJECT_NAME
+ }
+ } else {
+ if len(r.OBJECT_NAME) > 0 {
+ n += r.OBJECT_NAME
+ }
+ }
+ return n
+}
+
+func (r *table_lock_waits_summary_by_table_row) pretty_name() string {
+ s := r.name()
+ if len(s) > 30 {
+ s = s[:29]
+ }
+ return fmt.Sprintf("%-30s", s)
+}
+
+// Table Name Latency %| Read Write|S.Lock High NoIns Normal Extrnl|AlloWr CncIns WrtDly Low Normal Extrnl|
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxx 1234567890 100.0%|xxxxx% xxxxx%|xxxxx% xxxxx% xxxxx% xxxxx% xxxxx%|xxxxx% xxxxx% xxxxx% xxxxx% xxxxx% xxxxx%|
+func (r *table_lock_waits_summary_by_table_row) headings() string {
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s|%6s %6s %6s %6s %6s|%6s %6s %6s %6s %6s %6s",
+ "Table Name", "Latency", "%",
+ "Read", "Write",
+ "S.Lock", "High", "NoIns", "Normal", "Extrnl",
+ "AlloWr", "CncIns", "WrtDly", "Low", "Normal", "Extrnl")
+}
+
+// generate a printable result
+func (r *table_lock_waits_summary_by_table_row) row_content(totals table_lock_waits_summary_by_table_row) string {
+
+ // assume the data is empty so hide it.
+ name := r.pretty_name()
+ if r.COUNT_STAR == 0 {
+ name = ""
+ }
+
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s|%6s %6s %6s %6s %6s|%6s %6s %6s %6s %6s %6s",
+ name,
+ lib.FormatTime(r.SUM_TIMER_WAIT),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WAIT, totals.SUM_TIMER_WAIT)),
+
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE, r.SUM_TIMER_WAIT)),
+
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_WITH_SHARED_LOCKS, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_HIGH_PRIORITY, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_NO_INSERT, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_NORMAL, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_EXTERNAL, r.SUM_TIMER_WAIT)),
+
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_ALLOW_WRITE, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_CONCURRENT_INSERT, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_DELAYED, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_LOW_PRIORITY, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_NORMAL, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_EXTERNAL, r.SUM_TIMER_WAIT)))
+}
+
+func (this *table_lock_waits_summary_by_table_row) add(other table_lock_waits_summary_by_table_row) {
+ this.COUNT_STAR += other.COUNT_STAR
+ this.SUM_TIMER_WAIT += other.SUM_TIMER_WAIT
+ this.SUM_TIMER_READ += other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE += other.SUM_TIMER_WRITE
+ this.SUM_TIMER_READ_WITH_SHARED_LOCKS += other.SUM_TIMER_READ_WITH_SHARED_LOCKS
+ this.SUM_TIMER_READ_HIGH_PRIORITY += other.SUM_TIMER_READ_HIGH_PRIORITY
+ this.SUM_TIMER_READ_NO_INSERT += other.SUM_TIMER_READ_NO_INSERT
+ this.SUM_TIMER_READ_NORMAL += other.SUM_TIMER_READ_NORMAL
+ this.SUM_TIMER_READ_EXTERNAL += other.SUM_TIMER_READ_EXTERNAL
+ this.SUM_TIMER_WRITE_CONCURRENT_INSERT += other.SUM_TIMER_WRITE_CONCURRENT_INSERT
+ this.SUM_TIMER_WRITE_DELAYED += other.SUM_TIMER_WRITE_DELAYED
+ this.SUM_TIMER_WRITE_LOW_PRIORITY += other.SUM_TIMER_WRITE_LOW_PRIORITY
+ this.SUM_TIMER_WRITE_NORMAL += other.SUM_TIMER_WRITE_NORMAL
+ this.SUM_TIMER_WRITE_EXTERNAL += other.SUM_TIMER_WRITE_EXTERNAL
+}
+
+func (this *table_lock_waits_summary_by_table_row) subtract(other table_lock_waits_summary_by_table_row) {
+ this.COUNT_STAR -= other.COUNT_STAR
+ this.SUM_TIMER_WAIT -= other.SUM_TIMER_WAIT
+ this.SUM_TIMER_READ -= other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE -= other.SUM_TIMER_WRITE
+ this.SUM_TIMER_READ_WITH_SHARED_LOCKS -= other.SUM_TIMER_READ_WITH_SHARED_LOCKS
+ this.SUM_TIMER_READ_HIGH_PRIORITY -= other.SUM_TIMER_READ_HIGH_PRIORITY
+ this.SUM_TIMER_READ_NO_INSERT -= other.SUM_TIMER_READ_NO_INSERT
+ this.SUM_TIMER_READ_NORMAL -= other.SUM_TIMER_READ_NORMAL
+ this.SUM_TIMER_READ_EXTERNAL -= other.SUM_TIMER_READ_EXTERNAL
+ this.SUM_TIMER_WRITE_CONCURRENT_INSERT -= other.SUM_TIMER_WRITE_CONCURRENT_INSERT
+ this.SUM_TIMER_WRITE_DELAYED -= other.SUM_TIMER_WRITE_DELAYED
+ this.SUM_TIMER_WRITE_LOW_PRIORITY -= other.SUM_TIMER_WRITE_LOW_PRIORITY
+ this.SUM_TIMER_WRITE_NORMAL -= other.SUM_TIMER_WRITE_NORMAL
+ this.SUM_TIMER_WRITE_EXTERNAL -= other.SUM_TIMER_WRITE_EXTERNAL
+}
+
+// return the totals of a slice of rows
+func (t table_lock_waits_summary_by_table_rows) totals() table_lock_waits_summary_by_table_row {
+ var totals table_lock_waits_summary_by_table_row
+ totals.OBJECT_SCHEMA = "TOTALS"
+
+ for i := range t {
+ totals.add(t[i])
+ }
+
+ return totals
+}
+
+// Select the raw data from the database into file_summary_by_instance_rows
+// - filter out empty values
+// - merge rows with the same name into a single row
+// - change FILE_NAME into a more descriptive value.
+func select_tlwsbt_rows(dbh *sql.DB) table_lock_waits_summary_by_table_rows {
+ var t table_lock_waits_summary_by_table_rows
+
+ sql := "SELECT OBJECT_TYPE, OBJECT_SCHEMA, OBJECT_NAME, COUNT_STAR, SUM_TIMER_WAIT, SUM_TIMER_READ, SUM_TIMER_WRITE, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_DELAYED, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM table_lock_waits_summary_by_table WHERE COUNT_STAR > 0"
+
+ rows, err := dbh.Query(sql)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var r table_lock_waits_summary_by_table_row
+ if err := rows.Scan(
+ &r.OBJECT_TYPE,
+ &r.OBJECT_SCHEMA,
+ &r.OBJECT_NAME,
+ &r.COUNT_STAR,
+ &r.SUM_TIMER_WAIT,
+ &r.SUM_TIMER_READ,
+ &r.SUM_TIMER_WRITE,
+ &r.SUM_TIMER_READ_WITH_SHARED_LOCKS,
+ &r.SUM_TIMER_READ_HIGH_PRIORITY,
+ &r.SUM_TIMER_READ_NO_INSERT,
+ &r.SUM_TIMER_READ_NORMAL,
+ &r.SUM_TIMER_READ_EXTERNAL,
+ &r.SUM_TIMER_WRITE_ALLOW_WRITE,
+ &r.SUM_TIMER_WRITE_CONCURRENT_INSERT,
+ &r.SUM_TIMER_WRITE_DELAYED,
+ &r.SUM_TIMER_WRITE_LOW_PRIORITY,
+ &r.SUM_TIMER_WRITE_NORMAL,
+ &r.SUM_TIMER_WRITE_EXTERNAL); err != nil {
+ log.Fatal(err)
+ }
+ // we collect all data as we may need it later
+ t = append(t, r)
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return t
+}
+
+func (t table_lock_waits_summary_by_table_rows) Len() int { return len(t) }
+func (t table_lock_waits_summary_by_table_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t table_lock_waits_summary_by_table_rows) Less(i, j int) bool {
+ return (t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT) ||
+ ((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) &&
+ (t[i].OBJECT_SCHEMA < t[j].OBJECT_SCHEMA) &&
+ (t[i].OBJECT_NAME < t[j].OBJECT_NAME))
+
+}
+
+// sort the data
+func (t *table_lock_waits_summary_by_table_rows) sort() {
+ sort.Sort(t)
+}
+
+// remove the initial values from those rows where there's a match
+// - if we find a row we can't match ignore it
+func (this *table_lock_waits_summary_by_table_rows) subtract(initial table_lock_waits_summary_by_table_rows) {
+ i_by_name := make(map[string]int)
+
+ // iterate over rows by name
+ for i := range initial {
+ i_by_name[initial[i].name()] = i
+ }
+
+ for i := range *this {
+ if _, ok := i_by_name[(*this)[i].name()]; ok {
+ initial_i := i_by_name[(*this)[i].name()]
+ (*this)[i].subtract(initial[initial_i])
+ }
+ }
+}
+
+// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
+// check this by comparing totals.
+func (t table_lock_waits_summary_by_table_rows) needs_refresh(t2 table_lock_waits_summary_by_table_rows) bool {
+ my_totals := t.totals()
+ t2_totals := t2.totals()
+
+ return my_totals.SUM_TIMER_WAIT > t2_totals.SUM_TIMER_WAIT
+}
+
+// describe a whole row
+func (r table_lock_waits_summary_by_table_row) String() string {
+ return fmt.Sprintf("%-30s|%10s %10s %10s|%10s %10s %10s %10s %10s|%10s %10s %10s %10s %10s %10s",
+ r.pretty_name(),
+ lib.FormatTime(r.SUM_TIMER_WAIT),
+ lib.FormatTime(r.SUM_TIMER_READ),
+ lib.FormatTime(r.SUM_TIMER_WRITE),
+
+ lib.FormatTime(r.SUM_TIMER_READ_WITH_SHARED_LOCKS),
+ lib.FormatTime(r.SUM_TIMER_READ_HIGH_PRIORITY),
+ lib.FormatTime(r.SUM_TIMER_READ_NO_INSERT),
+ lib.FormatTime(r.SUM_TIMER_READ_NORMAL),
+ lib.FormatTime(r.SUM_TIMER_READ_EXTERNAL),
+
+ lib.FormatTime(r.SUM_TIMER_WRITE_ALLOW_WRITE),
+ lib.FormatTime(r.SUM_TIMER_WRITE_CONCURRENT_INSERT),
+ lib.FormatTime(r.SUM_TIMER_WRITE_DELAYED),
+ lib.FormatTime(r.SUM_TIMER_WRITE_LOW_PRIORITY),
+ lib.FormatTime(r.SUM_TIMER_WRITE_NORMAL),
+ lib.FormatTime(r.SUM_TIMER_WRITE_EXTERNAL))
+}
+
+// describe a whole table
+func (t table_lock_waits_summary_by_table_rows) String() string {
+ s := make([]string, len(t))
+
+ for i := range t {
+ s = append(s, t[i].String())
+ }
+
+ return strings.Join(s, "\n")
+}
+
+***** */
--- /dev/null
+// performance_schema - library routines for pstop.
+//
+// want_relative_stats
+package performance_schema
+
+// a table of rows
+type RelativeStats struct {
+ want_relative_stats bool
+}
+
+func (wrs *RelativeStats) SetWantRelativeStats(want_relative_stats bool) {
+ wrs.want_relative_stats = want_relative_stats
+}
+
+func (wrs RelativeStats) WantRelativeStats() bool {
+ return wrs.want_relative_stats
+}
--- /dev/null
+package performance_schema
+
+import (
+ "time"
+)
+
+type InitialTime struct {
+ initial_time time.Time
+}
+
+func (t *InitialTime) SetNow() {
+ t.initial_time = time.Now()
+}
+
+func (t InitialTime) Last() time.Time {
+ return t.initial_time
+}
--- /dev/null
+package replication_workload
+
+import (
+ "database/sql"
+
+ // "github.com/sjmudd/pstop/lib"
+ ps "github.com/sjmudd/pstop/performance_schema"
+)
+
+// a table of rows
+type Replication_workload struct {
+ ps.RelativeStats
+ ps.InitialTime
+ initial replication_workload_rows
+ current replication_workload_rows
+ results replication_workload_rows
+ totals replication_workload_row
+}
+
+// reset the statistics to current values
+func (t *Replication_workload) UpdateInitialValues() {
+ t.SetNow()
+ t.initial = make(replication_workload_rows, len(t.current))
+ copy(t.initial, t.current)
+
+ t.results = make(replication_workload_rows, len(t.current))
+ copy(t.results, t.current)
+
+ if t.WantRelativeStats() {
+ t.results.subtract(t.initial) // should be 0 if relative
+ }
+
+ t.results.sort()
+ t.totals = t.results.totals()
+}
+
+// Collect data from the db, then merge it in.
+func (t *Replication_workload) Collect(dbh *sql.DB) {
+}
+
+
+
+
+
+
+
+// return the headings for a table
+func (t Replication_workload) Headings() string {
+ var r replication_workload_row
+
+ return r.headings()
+}
+
+// return the rows we need for displaying
+func (t Replication_workload) RowContent(max_rows int) []string {
+ rows := make([]string, 0, max_rows)
+
+ for i := range t.results {
+ if i < max_rows {
+ rows = append(rows, t.results[i].row_content(t.totals))
+ }
+ }
+
+ return rows
+}
+
+// return all the totals
+func (t Replication_workload) TotalRowContent() string {
+ return t.totals.row_content(t.totals)
+}
+
+// return an empty string of data (for filling in)
+func (t Replication_workload) EmptyRowContent() string {
+ var emtpy replication_workload_row
+ return emtpy.row_content(emtpy)
+}
+
+func (t Replication_workload) Description() string {
+ return "File I/O by filename (replication_workload)"
+}
+
--- /dev/null
+package replication_workload
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+
+ "github.com/sjmudd/pstop/lib"
+)
+
+type replication_workload_row struct {
+ NAME string
+ EVENT_NAME string
+ OBJECT_NAME string
+ OPERATION string
+ SUM_TIMER_WAIT int
+ SUM_SPINS int
+ SUM_NUMBER_OF_BYTES int
+}
+
+type replication_workload_rows []replication_workload_row
+
+func select_rep_workload_rows(dbh *sql.DB) replication_workload_rows {
+ var t replication_workload_rows
+
+ sql := "SELECT t.NAME, ewc.EVENT_NAME, ewc.OBJECT_NAME, ewc.OPERATION, SUM(ewc.TIMER_WAIT) AS SUM_TIMER_WAIT, SUM(ewc.SPINS) AS SUM_SPINS, SUM(ewc.NUMBER_OF_BYTES) AS SUM_NUMBER_OF_BYTES, FROM events_waits_history ewc JOIN threads t ON (t.THREAD_ID = ewc.thread_id) WHERE t.NAME LIKE '%slave_sql%' GROUP BY t.NAME, ewc.EVENT_NAME, ewc.OBJECT_NAME, ewc.OPERATION"
+
+ rows, err := dbh.Query(sql)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var r replication_workload_row
+
+ if err := rows.Scan(&r.NAME, &r.EVENT_NAME, &r.OBJECT_NAME, &r.OPERATION, &r.SUM_TIMER_WAIT, &r.SUM_SPINS, &r.SUM_NUMBER_OF_BYTES); err != nil {
+ log.Fatal(err)
+ }
+ t = append(t, r)
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return t
+}
+
+func (this *replication_workload_row) add(other replication_workload_row) {
+ this.SUM_TIMER_WAIT += other.SUM_TIMER_WAIT
+ this.SUM_SPINS += other.SUM_SPINS
+ this.SUM_NUMBER_OF_BYTES += other.SUM_NUMBER_OF_BYTES
+}
+
+func (this *replication_workload_row) subtract(other replication_workload_row) {
+ this.SUM_TIMER_WAIT -= other.SUM_TIMER_WAIT
+ this.SUM_SPINS -= other.SUM_SPINS
+ this.SUM_NUMBER_OF_BYTES -= other.SUM_NUMBER_OF_BYTES
+}
+
+func (t replication_workload_rows) Len() int { return len(t) }
+func (t replication_workload_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+// may need to adjust ordering here.!!!
+func (t replication_workload_rows) Less(i, j int) bool {
+ return t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT
+}
+
+func (t *replication_workload_rows) sort() {
+ sort.Sort(t)
+}
+
+// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
+// check this by comparing totals.
+func (t replication_workload_rows) needs_refresh(t2 replication_workload_rows) bool {
+ my_totals := t.totals()
+ t2_totals := t2.totals()
+
+ return my_totals.SUM_TIMER_WAIT > t2_totals.SUM_TIMER_WAIT
+}
+
--- /dev/null
+// performance_schema - library routines for pstop.
+//
+// This file contains the library routines for managing the
+// table_io_waits_by_table table.
+package table_io_waits_summary_by_table
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/sjmudd/pstop/lib"
+ ps "github.com/sjmudd/pstop/performance_schema"
+)
+
+// a table of rows
+type Table_io_waits_summary_by_table struct {
+ ps.RelativeStats
+ ps.InitialTime
+ want_latency bool
+ initial table_io_waits_summary_by_table_rows // initial data for relative values
+ current table_io_waits_summary_by_table_rows // last loaded values
+ results table_io_waits_summary_by_table_rows // results (maybe with subtraction)
+ totals table_io_waits_summary_by_table_row // totals of results
+}
+
+func (t *Table_io_waits_summary_by_table) SetWantsLatency(want_latency bool) {
+ t.want_latency = want_latency
+}
+
+func (t Table_io_waits_summary_by_table) WantsLatency() bool {
+ return t.want_latency
+}
+
+// Collect() collects data from the db, updating initial
+// values if needed, and then subtracting initial values if we want
+// relative values, after which it stores totals.
+func (t *Table_io_waits_summary_by_table) Collect(dbh *sql.DB) {
+ lib.Logger.Println("Table_io_waits_summary_by_table.Collect() BEGIN")
+ t.current = select_tiwsbt_rows(dbh)
+ lib.Logger.Println("- t.current set from", len(t.current), "collected row(s) from SELECT")
+
+ if len(t.initial) == 0 && len(t.current) > 0 {
+ // lib.Logger.Println("- setting t.initial to initial value" )
+ t.initial = make(table_io_waits_summary_by_table_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ // check for reload initial characteristics
+ if t.initial.needs_refresh(t.current) {
+ // lib.Logger.Println( "- t.initial data needs refreshing!" )
+ t.initial = make(table_io_waits_summary_by_table_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ t.make_results()
+
+ // lib.Logger.Println( "t.initial:", t.initial )
+ // lib.Logger.Println( "t.current:", t.current )
+ lib.Logger.Println("t.results:", t.results)
+ lib.Logger.Println("t.totals:", t.totals)
+ lib.Logger.Println("Table_io_waits_summary_by_table.Collect() END")
+}
+
+func (t *Table_io_waits_summary_by_table) make_results() {
+ // lib.Logger.Println( "- t.results set from t.current" )
+ t.results = make(table_io_waits_summary_by_table_rows, len(t.current))
+ copy(t.results, t.current)
+ if t.WantRelativeStats() {
+ // lib.Logger.Println( "- subtracting t.initial from t.results as WantRelativeStats()" )
+ t.results.subtract(t.initial)
+ }
+
+ // lib.Logger.Println( "- sorting t.results" )
+ t.results.Sort(t.want_latency)
+ // lib.Logger.Println( "- collecting t.totals from t.results" )
+ t.totals = t.results.totals()
+}
+
+// reset the statistics to current values
+func (t *Table_io_waits_summary_by_table) UpdateInitialValues() {
+ // lib.Logger.Println( "Table_io_waits_summary_by_table.UpdateInitialValues() BEGIN" )
+
+ t.initial = make(table_io_waits_summary_by_table_rows, len(t.current))
+ copy(t.initial, t.current)
+
+ t.make_results()
+
+ // lib.Logger.Println( "Table_io_waits_summary_by_table.UpdateInitialValues() END" )
+}
+
+func (t *Table_io_waits_summary_by_table) Headings() string {
+ if t.want_latency {
+ return t.latencyHeadings()
+ } else {
+ return t.opsHeadings()
+ }
+}
+
+func (t Table_io_waits_summary_by_table) RowContent(max_rows int) []string {
+ if t.want_latency {
+ return t.latencyRowContent(max_rows)
+ } else {
+ return t.opsRowContent(max_rows)
+ }
+}
+
+func (t Table_io_waits_summary_by_table) EmptyRowContent() string {
+ if t.want_latency {
+ return t.emptyLatencyRowContent()
+ } else {
+ return t.emptyOpsRowContent()
+ }
+}
+
+func (t Table_io_waits_summary_by_table) TotalRowContent() string {
+ if t.want_latency {
+ return t.totalLatencyRowContent()
+ } else {
+ return t.totalOpsRowContent()
+ }
+}
+
+func (t Table_io_waits_summary_by_table) Description() string {
+ if t.want_latency {
+ return t.latencyDescription()
+ } else {
+ return t.opsDescription()
+ }
+}
+
+func (t *Table_io_waits_summary_by_table) latencyHeadings() string {
+ var r table_io_waits_summary_by_table_row
+
+ return r.latency_headings()
+}
+
+func (t *Table_io_waits_summary_by_table) opsHeadings() string {
+ var r table_io_waits_summary_by_table_row
+
+ return r.ops_headings()
+}
+
+func (t Table_io_waits_summary_by_table) opsRowContent(max_rows int) []string {
+ rows := make([]string, 0, max_rows)
+
+ for i := range t.results {
+ if i < max_rows {
+ rows = append(rows, t.results[i].ops_row_content(t.totals))
+ }
+ }
+
+ return rows
+}
+
+func (t Table_io_waits_summary_by_table) latencyRowContent(max_rows int) []string {
+ rows := make([]string, 0, max_rows)
+
+ for i := range t.results {
+ if i < max_rows {
+ rows = append(rows, t.results[i].latency_row_content(t.totals))
+ }
+ }
+
+ return rows
+}
+
+func (t Table_io_waits_summary_by_table) emptyOpsRowContent() string {
+ var r table_io_waits_summary_by_table_row
+
+ return r.ops_row_content(r)
+}
+
+func (t Table_io_waits_summary_by_table) emptyLatencyRowContent() string {
+ var r table_io_waits_summary_by_table_row
+
+ return r.latency_row_content(r)
+}
+
+func (t Table_io_waits_summary_by_table) totalOpsRowContent() string {
+ return t.totals.ops_row_content(t.totals)
+}
+
+func (t Table_io_waits_summary_by_table) totalLatencyRowContent() string {
+ return t.totals.latency_row_content(t.totals)
+}
+
+func (t Table_io_waits_summary_by_table) latencyDescription() string {
+ count := t.count_rows()
+ return fmt.Sprintf("Latency by Table Name (table_io_waits_summary_by_table) %d rows", count)
+}
+
+func (t Table_io_waits_summary_by_table) opsDescription() string {
+ count := t.count_rows()
+ return fmt.Sprintf("Operations by Table Name (table_io_waits_summary_by_table) %d rows", count)
+}
+
+func (t Table_io_waits_summary_by_table) count_rows() int {
+ var count int
+ for row := range t.results {
+ if t.results[row].SUM_TIMER_WAIT > 0 {
+ count++
+ }
+ }
+ return count
+}
--- /dev/null
+// This file contains the library routines for managing the
+// table_io_waits_by_table table.
+package table_io_waits_summary_by_table
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+ "sort"
+ "strings"
+
+ "github.com/sjmudd/pstop/lib"
+)
+
+// a row from performance_schema.table_io_waits_summary_by_table
+type table_io_waits_summary_by_table_row struct {
+ // Note: upper case names to match the performance_schema column names
+ // This type is _not_ exported.
+
+ OBJECT_TYPE string // in theory redundant but keep anyway
+ OBJECT_SCHEMA string // in theory redundant but keep anyway
+ OBJECT_NAME string // in theory redundant but keep anyway
+
+ SUM_TIMER_WAIT int
+ SUM_TIMER_READ int
+ SUM_TIMER_WRITE int
+ SUM_TIMER_FETCH int
+ SUM_TIMER_INSERT int
+ SUM_TIMER_UPDATE int
+ SUM_TIMER_DELETE int
+
+ COUNT_STAR int
+ COUNT_READ int
+ COUNT_WRITE int
+ COUNT_FETCH int
+ COUNT_INSERT int
+ COUNT_UPDATE int
+ COUNT_DELETE int
+}
+type table_io_waits_summary_by_table_rows []table_io_waits_summary_by_table_row
+
+// // return the table name from the columns as '<schema>.<table>'
+func (r *table_io_waits_summary_by_table_row) name() string {
+ var n string
+ if len(r.OBJECT_SCHEMA) > 0 {
+ n += r.OBJECT_SCHEMA
+ }
+ if len(n) > 0 {
+ if len(r.OBJECT_NAME) > 0 {
+ n += "." + r.OBJECT_NAME
+ }
+ } else {
+ if len(r.OBJECT_NAME) > 0 {
+ n += r.OBJECT_NAME
+ }
+ }
+ return n
+}
+
+func (r *table_io_waits_summary_by_table_row) pretty_name() string {
+ s := r.name()
+ if len(s) > 30 {
+ s = s[:29]
+ }
+ return fmt.Sprintf("%-30s", s)
+}
+
+func (r *table_io_waits_summary_by_table_row) latency_headings() string {
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s %6s", "Table Name", "Latency", "%", "Fetch", "Insert", "Update", "Delete")
+}
+func (r *table_io_waits_summary_by_table_row) ops_headings() string {
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s %6s", "Table Name", "Ops", "%", "Fetch", "Insert", "Update", "Delete")
+}
+
+// generate a printable result
+func (r *table_io_waits_summary_by_table_row) latency_row_content(totals table_io_waits_summary_by_table_row) string {
+ // assume the data is empty so hide it.
+ name := r.pretty_name()
+ if r.COUNT_STAR == 0 {
+ name = ""
+ }
+
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s %6s",
+ name,
+ lib.FormatTime(r.SUM_TIMER_WAIT),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WAIT, totals.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_FETCH, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_INSERT, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_UPDATE, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_DELETE, r.SUM_TIMER_WAIT)))
+}
+
+// generate a printable result for ops
+func (r *table_io_waits_summary_by_table_row) ops_row_content(totals table_io_waits_summary_by_table_row) string {
+ // assume the data is empty so hide it.
+ name := r.pretty_name()
+ if r.COUNT_STAR == 0 {
+ name = ""
+ }
+
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s %6s",
+ name,
+ lib.FormatAmount(r.COUNT_STAR),
+ lib.FormatPct(lib.MyDivide(r.COUNT_STAR, totals.COUNT_STAR)),
+ lib.FormatPct(lib.MyDivide(r.COUNT_FETCH, r.COUNT_STAR)),
+ lib.FormatPct(lib.MyDivide(r.COUNT_INSERT, r.COUNT_STAR)),
+ lib.FormatPct(lib.MyDivide(r.COUNT_UPDATE, r.COUNT_STAR)),
+ lib.FormatPct(lib.MyDivide(r.COUNT_DELETE, r.COUNT_STAR)))
+}
+
+func (this *table_io_waits_summary_by_table_row) add(other table_io_waits_summary_by_table_row) {
+ this.SUM_TIMER_WAIT += other.SUM_TIMER_WAIT
+ this.SUM_TIMER_FETCH += other.SUM_TIMER_FETCH
+ this.SUM_TIMER_INSERT += other.SUM_TIMER_INSERT
+ this.SUM_TIMER_UPDATE += other.SUM_TIMER_UPDATE
+ this.SUM_TIMER_DELETE += other.SUM_TIMER_DELETE
+ this.SUM_TIMER_READ += other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE += other.SUM_TIMER_WRITE
+
+ this.COUNT_STAR += other.COUNT_STAR
+ this.COUNT_FETCH += other.COUNT_FETCH
+ this.COUNT_INSERT += other.COUNT_INSERT
+ this.COUNT_UPDATE += other.COUNT_UPDATE
+ this.COUNT_DELETE += other.COUNT_DELETE
+ this.COUNT_READ += other.COUNT_READ
+ this.COUNT_WRITE += other.COUNT_WRITE
+}
+
+func (this *table_io_waits_summary_by_table_row) subtract(other table_io_waits_summary_by_table_row) {
+ this.SUM_TIMER_WAIT -= other.SUM_TIMER_WAIT
+ this.SUM_TIMER_FETCH -= other.SUM_TIMER_FETCH
+ this.SUM_TIMER_INSERT -= other.SUM_TIMER_INSERT
+ this.SUM_TIMER_UPDATE -= other.SUM_TIMER_UPDATE
+ this.SUM_TIMER_DELETE -= other.SUM_TIMER_DELETE
+ this.SUM_TIMER_READ -= other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE -= other.SUM_TIMER_WRITE
+
+ this.COUNT_STAR -= other.COUNT_STAR
+ this.COUNT_FETCH -= other.COUNT_FETCH
+ this.COUNT_INSERT -= other.COUNT_INSERT
+ this.COUNT_UPDATE -= other.COUNT_UPDATE
+ this.COUNT_DELETE -= other.COUNT_DELETE
+ this.COUNT_READ -= other.COUNT_READ
+ this.COUNT_WRITE -= other.COUNT_WRITE
+}
+
+func (t table_io_waits_summary_by_table_rows) totals() table_io_waits_summary_by_table_row {
+ var totals table_io_waits_summary_by_table_row
+ totals.OBJECT_SCHEMA = "TOTALS"
+
+ for i := range t {
+ totals.add(t[i])
+ }
+
+ return totals
+}
+
+func select_tiwsbt_rows(dbh *sql.DB) table_io_waits_summary_by_table_rows {
+ var t table_io_waits_summary_by_table_rows
+
+ // we collect all information even if it's mainly empty as we may reference it later
+ sql := "SELECT OBJECT_TYPE, OBJECT_SCHEMA, OBJECT_NAME, COUNT_STAR, SUM_TIMER_WAIT, COUNT_READ, SUM_TIMER_READ, COUNT_WRITE, SUM_TIMER_WRITE, COUNT_FETCH, SUM_TIMER_FETCH, COUNT_INSERT, SUM_TIMER_INSERT, COUNT_UPDATE, SUM_TIMER_UPDATE, COUNT_DELETE, SUM_TIMER_DELETE FROM table_io_waits_summary_by_table WHERE SUM_TIMER_WAIT > 0"
+
+ rows, err := dbh.Query(sql)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var r table_io_waits_summary_by_table_row
+ if err := rows.Scan(
+ &r.OBJECT_TYPE,
+ &r.OBJECT_SCHEMA,
+ &r.OBJECT_NAME,
+ &r.COUNT_STAR,
+ &r.SUM_TIMER_WAIT,
+ &r.COUNT_READ,
+ &r.SUM_TIMER_READ,
+ &r.COUNT_WRITE,
+ &r.SUM_TIMER_WRITE,
+ &r.COUNT_FETCH,
+ &r.SUM_TIMER_FETCH,
+ &r.COUNT_INSERT,
+ &r.SUM_TIMER_INSERT,
+ &r.COUNT_UPDATE,
+ &r.SUM_TIMER_UPDATE,
+ &r.COUNT_DELETE,
+ &r.SUM_TIMER_DELETE); err != nil {
+ log.Fatal(err)
+ }
+ // we collect all information even if it's mainly empty as we may reference it later
+ t = append(t, r)
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return t
+}
+
+func (t table_io_waits_summary_by_table_rows) Len() int { return len(t) }
+func (t table_io_waits_summary_by_table_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+
+// sort by value (descending) but also by "name" (ascending) if the values are the same
+func (t table_io_waits_summary_by_table_rows) Less(i, j int) bool {
+ return (t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT) ||
+ ((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) &&
+ (t[i].OBJECT_SCHEMA < t[j].OBJECT_SCHEMA) &&
+ (t[i].OBJECT_NAME < t[j].OBJECT_NAME))
+}
+
+// for sorting
+type ByOps table_io_waits_summary_by_table_rows
+
+func (t ByOps) Len() int { return len(t) }
+func (t ByOps) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t ByOps) Less(i, j int) bool {
+ return (t[i].COUNT_STAR > t[j].COUNT_STAR) ||
+ ((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) &&
+ (t[i].OBJECT_SCHEMA < t[j].OBJECT_SCHEMA) &&
+ (t[i].OBJECT_NAME < t[j].OBJECT_NAME))
+}
+
+func (t table_io_waits_summary_by_table_rows) Sort(want_latency bool) {
+ if want_latency {
+ sort.Sort(t)
+ } else {
+ sort.Sort(ByOps(t))
+ }
+}
+
+// remove the initial values from those rows where there's a match
+// - if we find a row we can't match ignore it
+func (this *table_io_waits_summary_by_table_rows) subtract(initial table_io_waits_summary_by_table_rows) {
+ i_by_name := make(map[string]int)
+
+ // iterate over rows by name
+ for i := range initial {
+ i_by_name[initial[i].name()] = i
+ }
+
+ for i := range *this {
+ if _, ok := i_by_name[(*this)[i].name()]; ok {
+ initial_i := i_by_name[(*this)[i].name()]
+ (*this)[i].subtract(initial[initial_i])
+ }
+ }
+}
+
+// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
+// check this by comparing totals.
+func (t table_io_waits_summary_by_table_rows) needs_refresh(t2 table_io_waits_summary_by_table_rows) bool {
+ my_totals := t.totals()
+ t2_totals := t2.totals()
+
+ return my_totals.SUM_TIMER_WAIT > t2_totals.SUM_TIMER_WAIT
+}
+
+// describe a whole row
+func (r table_io_waits_summary_by_table_row) String() string {
+ return fmt.Sprintf("%-30s|%10s %10s %10s %10s %10s|%10s %10s|%10s %10s %10s %10s %10s|%10s %10s",
+ r.pretty_name(),
+ lib.FormatTime(r.SUM_TIMER_WAIT),
+ lib.FormatTime(r.SUM_TIMER_FETCH),
+ lib.FormatTime(r.SUM_TIMER_INSERT),
+ lib.FormatTime(r.SUM_TIMER_UPDATE),
+ lib.FormatTime(r.SUM_TIMER_DELETE),
+
+ lib.FormatTime(r.SUM_TIMER_READ),
+ lib.FormatTime(r.SUM_TIMER_WRITE),
+
+ lib.FormatAmount(r.COUNT_STAR),
+ lib.FormatAmount(r.COUNT_FETCH),
+ lib.FormatAmount(r.COUNT_INSERT),
+ lib.FormatAmount(r.COUNT_UPDATE),
+ lib.FormatAmount(r.COUNT_DELETE),
+
+ lib.FormatAmount(r.COUNT_READ),
+ lib.FormatAmount(r.COUNT_WRITE))
+}
+
+// describe a whole table
+func (t table_io_waits_summary_by_table_rows) String() string {
+ s := make([]string, len(t))
+
+ for i := range t {
+ s = append(s, t[i].String())
+ }
+
+ return strings.Join(s, "\n")
+}
--- /dev/null
+// This file contains the library routines for managing the
+// table_lock_waits_summary_by_table table.
+package table_lock_waits_summary_by_table
+
+import (
+ "database/sql"
+ _ "github.com/go-sql-driver/mysql"
+
+ // "github.com/sjmudd/pstop/lib"
+ ps "github.com/sjmudd/pstop/performance_schema"
+)
+
+// a table of rows
+type Table_lock_waits_summary_by_table struct {
+ ps.RelativeStats
+ ps.InitialTime
+ initial table_lock_waits_summary_by_table_rows // initial data for relative values
+ current table_lock_waits_summary_by_table_rows // last loaded values
+ results table_lock_waits_summary_by_table_rows // results (maybe with subtraction)
+ totals table_lock_waits_summary_by_table_row // totals of results
+}
+
+// Collect data from the db, then merge it in.
+func (t *Table_lock_waits_summary_by_table) Collect(dbh *sql.DB) {
+ t.current = select_tlwsbt_rows(dbh)
+
+ if len(t.initial) == 0 && len(t.current) > 0 {
+ t.initial = make(table_lock_waits_summary_by_table_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ // check for reload initial characteristics
+ if t.initial.needs_refresh(t.current) {
+ t.initial = make(table_lock_waits_summary_by_table_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ t.make_results()
+}
+
+func (t *Table_lock_waits_summary_by_table) make_results() {
+ // lib.Logger.Println( "- t.results set from t.current" )
+ t.results = make(table_lock_waits_summary_by_table_rows, len(t.current))
+ copy(t.results, t.current)
+ if t.WantRelativeStats() {
+ // lib.Logger.Println( "- subtracting t.initial from t.results as WantRelativeStats()" )
+ t.results.subtract(t.initial)
+ }
+
+ // lib.Logger.Println( "- sorting t.results" )
+ t.results.sort()
+ // lib.Logger.Println( "- collecting t.totals from t.results" )
+ t.totals = t.results.totals()
+}
+
+// reset the statistics to current values
+func (t *Table_lock_waits_summary_by_table) UpdateInitialValues() {
+ t.SetNow()
+ t.initial = make(table_lock_waits_summary_by_table_rows, len(t.current))
+ copy(t.initial, t.current)
+
+ t.make_results()
+}
+
+// return the headings for a table
+func (t Table_lock_waits_summary_by_table) Headings() string {
+ var r table_lock_waits_summary_by_table_row
+
+ return r.headings()
+}
+
+// return the rows we need for displaying
+func (t Table_lock_waits_summary_by_table) RowContent(max_rows int) []string {
+ rows := make([]string, 0, max_rows)
+
+ for i := range t.results {
+ if i < max_rows {
+ rows = append(rows, t.results[i].row_content(t.totals))
+ }
+ }
+
+ return rows
+}
+
+// return all the totals
+func (t Table_lock_waits_summary_by_table) TotalRowContent() string {
+ return t.totals.row_content(t.totals)
+}
+
+// return an empty string of data (for filling in)
+func (t Table_lock_waits_summary_by_table) EmptyRowContent() string {
+ var emtpy table_lock_waits_summary_by_table_row
+ return emtpy.row_content(emtpy)
+}
+
+func (t Table_lock_waits_summary_by_table) Description() string {
+ return "Locks by Table Name (table_lock_waits_summary_by_table)"
+}
--- /dev/null
+// This file contains the library routines for managing the
+// table_lock_waits_summary_by_table table.
+package table_lock_waits_summary_by_table
+
+import (
+ "database/sql"
+ "fmt"
+ _ "github.com/go-sql-driver/mysql"
+ "log"
+ "sort"
+ "strings"
+
+ "github.com/sjmudd/pstop/lib"
+)
+
+/*
+
+From 5.7.5
+
+*************************** 1. row ***************************
+ Table: table_lock_waits_summary_by_table
+Create Table: CREATE TABLE `table_lock_waits_summary_by_table` (
+ `OBJECT_TYPE` varchar(64) DEFAULT NULL,
+ `OBJECT_SCHEMA` varchar(64) DEFAULT NULL,
+ `OBJECT_NAME` varchar(64) DEFAULT NULL,
+ `COUNT_STAR` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `COUNT_READ` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ` bigint(20) unsigned NOT NULL,
+ `COUNT_WRITE` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE` bigint(20) unsigned NOT NULL,
+ `COUNT_READ_NORMAL` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ_NORMAL` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ_NORMAL` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ_NORMAL` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ_NORMAL` bigint(20) unsigned NOT NULL,
+ `COUNT_READ_WITH_SHARED_LOCKS` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ_WITH_SHARED_LOCKS` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ_WITH_SHARED_LOCKS` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ_WITH_SHARED_LOCKS` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ_WITH_SHARED_LOCKS` bigint(20) unsigned NOT NULL,
+ `COUNT_READ_HIGH_PRIORITY` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ_HIGH_PRIORITY` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ_HIGH_PRIORITY` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ_HIGH_PRIORITY` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ_HIGH_PRIORITY` bigint(20) unsigned NOT NULL,
+ `COUNT_READ_NO_INSERT` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ_NO_INSERT` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ_NO_INSERT` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ_NO_INSERT` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ_NO_INSERT` bigint(20) unsigned NOT NULL,
+ `COUNT_READ_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_READ_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_READ_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_READ_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_READ_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `COUNT_WRITE_ALLOW_WRITE` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE_ALLOW_WRITE` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE_ALLOW_WRITE` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE_ALLOW_WRITE` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE_ALLOW_WRITE` bigint(20) unsigned NOT NULL,
+ `COUNT_WRITE_CONCURRENT_INSERT` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE_CONCURRENT_INSERT` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE_CONCURRENT_INSERT` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE_CONCURRENT_INSERT` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE_CONCURRENT_INSERT` bigint(20) unsigned NOT NULL,
+ `COUNT_WRITE_LOW_PRIORITY` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE_LOW_PRIORITY` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE_LOW_PRIORITY` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE_LOW_PRIORITY` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE_LOW_PRIORITY` bigint(20) unsigned NOT NULL,
+ `COUNT_WRITE_NORMAL` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE_NORMAL` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE_NORMAL` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE_NORMAL` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE_NORMAL` bigint(20) unsigned NOT NULL,
+ `COUNT_WRITE_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WRITE_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WRITE_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WRITE_EXTERNAL` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WRITE_EXTERNAL` bigint(20) unsigned NOT NULL
+) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
+
+*/
+
+type table_lock_waits_summary_by_table_row struct {
+ OBJECT_TYPE string // in theory redundant but keep anyway
+ OBJECT_SCHEMA string // in theory redundant but keep anyway
+ OBJECT_NAME string // in theory redundant but keep anyway
+ COUNT_STAR int
+
+ SUM_TIMER_WAIT int
+ SUM_TIMER_READ int
+ SUM_TIMER_WRITE int
+
+ SUM_TIMER_READ_WITH_SHARED_LOCKS int
+ SUM_TIMER_READ_HIGH_PRIORITY int
+ SUM_TIMER_READ_NO_INSERT int
+ SUM_TIMER_READ_NORMAL int
+ SUM_TIMER_READ_EXTERNAL int
+
+ SUM_TIMER_WRITE_ALLOW_WRITE int
+ SUM_TIMER_WRITE_CONCURRENT_INSERT int
+ SUM_TIMER_WRITE_LOW_PRIORITY int
+ SUM_TIMER_WRITE_NORMAL int
+ SUM_TIMER_WRITE_EXTERNAL int
+}
+
+type table_lock_waits_summary_by_table_rows []table_lock_waits_summary_by_table_row
+
+// return the table name from the columns as '<schema>.<table>'
+func (r *table_lock_waits_summary_by_table_row) name() string {
+ var n string
+ if len(r.OBJECT_SCHEMA) > 0 {
+ n += r.OBJECT_SCHEMA
+ }
+ if len(n) > 0 {
+ if len(r.OBJECT_NAME) > 0 {
+ n += "." + r.OBJECT_NAME
+ }
+ } else {
+ if len(r.OBJECT_NAME) > 0 {
+ n += r.OBJECT_NAME
+ }
+ }
+ return n
+}
+
+func (r *table_lock_waits_summary_by_table_row) pretty_name() string {
+ s := r.name()
+ if len(s) > 30 {
+ s = s[:29]
+ }
+ return fmt.Sprintf("%-30s", s)
+}
+
+// Table Name Latency %| Read Write|S.Lock High NoIns Normal Extrnl|AlloWr CncIns WrtDly Low Normal Extrnl|
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxx 1234567890 100.0%|xxxxx% xxxxx%|xxxxx% xxxxx% xxxxx% xxxxx% xxxxx%|xxxxx% xxxxx% xxxxx% xxxxx% xxxxx% xxxxx%|
+func (r *table_lock_waits_summary_by_table_row) headings() string {
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s|%6s %6s %6s %6s %6s|%6s %6s %6s %6s %6s",
+ "Table Name", "Latency", "%",
+ "Read", "Write",
+ "S.Lock", "High", "NoIns", "Normal", "Extrnl",
+ "AlloWr", "CncIns", "Low", "Normal", "Extrnl")
+}
+
+// generate a printable result
+func (r *table_lock_waits_summary_by_table_row) row_content(totals table_lock_waits_summary_by_table_row) string {
+
+ // assume the data is empty so hide it.
+ name := r.pretty_name()
+ if r.COUNT_STAR == 0 {
+ name = ""
+ }
+
+ return fmt.Sprintf("%-30s %10s %6s|%6s %6s|%6s %6s %6s %6s %6s|%6s %6s %6s %6s %6s",
+ name,
+ lib.FormatTime(r.SUM_TIMER_WAIT),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WAIT, totals.SUM_TIMER_WAIT)),
+
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE, r.SUM_TIMER_WAIT)),
+
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_WITH_SHARED_LOCKS, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_HIGH_PRIORITY, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_NO_INSERT, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_NORMAL, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_READ_EXTERNAL, r.SUM_TIMER_WAIT)),
+
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_ALLOW_WRITE, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_CONCURRENT_INSERT, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_LOW_PRIORITY, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_NORMAL, r.SUM_TIMER_WAIT)),
+ lib.FormatPct(lib.MyDivide(r.SUM_TIMER_WRITE_EXTERNAL, r.SUM_TIMER_WAIT)))
+}
+
+func (this *table_lock_waits_summary_by_table_row) add(other table_lock_waits_summary_by_table_row) {
+ this.COUNT_STAR += other.COUNT_STAR
+ this.SUM_TIMER_WAIT += other.SUM_TIMER_WAIT
+ this.SUM_TIMER_READ += other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE += other.SUM_TIMER_WRITE
+ this.SUM_TIMER_READ_WITH_SHARED_LOCKS += other.SUM_TIMER_READ_WITH_SHARED_LOCKS
+ this.SUM_TIMER_READ_HIGH_PRIORITY += other.SUM_TIMER_READ_HIGH_PRIORITY
+ this.SUM_TIMER_READ_NO_INSERT += other.SUM_TIMER_READ_NO_INSERT
+ this.SUM_TIMER_READ_NORMAL += other.SUM_TIMER_READ_NORMAL
+ this.SUM_TIMER_READ_EXTERNAL += other.SUM_TIMER_READ_EXTERNAL
+ this.SUM_TIMER_WRITE_CONCURRENT_INSERT += other.SUM_TIMER_WRITE_CONCURRENT_INSERT
+ this.SUM_TIMER_WRITE_LOW_PRIORITY += other.SUM_TIMER_WRITE_LOW_PRIORITY
+ this.SUM_TIMER_WRITE_NORMAL += other.SUM_TIMER_WRITE_NORMAL
+ this.SUM_TIMER_WRITE_EXTERNAL += other.SUM_TIMER_WRITE_EXTERNAL
+}
+
+func (this *table_lock_waits_summary_by_table_row) subtract(other table_lock_waits_summary_by_table_row) {
+ this.COUNT_STAR -= other.COUNT_STAR
+ this.SUM_TIMER_WAIT -= other.SUM_TIMER_WAIT
+ this.SUM_TIMER_READ -= other.SUM_TIMER_READ
+ this.SUM_TIMER_WRITE -= other.SUM_TIMER_WRITE
+ this.SUM_TIMER_READ_WITH_SHARED_LOCKS -= other.SUM_TIMER_READ_WITH_SHARED_LOCKS
+ this.SUM_TIMER_READ_HIGH_PRIORITY -= other.SUM_TIMER_READ_HIGH_PRIORITY
+ this.SUM_TIMER_READ_NO_INSERT -= other.SUM_TIMER_READ_NO_INSERT
+ this.SUM_TIMER_READ_NORMAL -= other.SUM_TIMER_READ_NORMAL
+ this.SUM_TIMER_READ_EXTERNAL -= other.SUM_TIMER_READ_EXTERNAL
+ this.SUM_TIMER_WRITE_CONCURRENT_INSERT -= other.SUM_TIMER_WRITE_CONCURRENT_INSERT
+ this.SUM_TIMER_WRITE_LOW_PRIORITY -= other.SUM_TIMER_WRITE_LOW_PRIORITY
+ this.SUM_TIMER_WRITE_NORMAL -= other.SUM_TIMER_WRITE_NORMAL
+ this.SUM_TIMER_WRITE_EXTERNAL -= other.SUM_TIMER_WRITE_EXTERNAL
+}
+
+// return the totals of a slice of rows
+func (t table_lock_waits_summary_by_table_rows) totals() table_lock_waits_summary_by_table_row {
+ var totals table_lock_waits_summary_by_table_row
+ totals.OBJECT_SCHEMA = "TOTALS"
+
+ for i := range t {
+ totals.add(t[i])
+ }
+
+ return totals
+}
+
+// Select the raw data from the database into file_summary_by_instance_rows
+// - filter out empty values
+// - merge rows with the same name into a single row
+// - change FILE_NAME into a more descriptive value.
+func select_tlwsbt_rows(dbh *sql.DB) table_lock_waits_summary_by_table_rows {
+ var t table_lock_waits_summary_by_table_rows
+
+ sql := "SELECT OBJECT_TYPE, OBJECT_SCHEMA, OBJECT_NAME, COUNT_STAR, SUM_TIMER_WAIT, SUM_TIMER_READ, SUM_TIMER_WRITE, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM table_lock_waits_summary_by_table WHERE COUNT_STAR > 0"
+
+ rows, err := dbh.Query(sql)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var r table_lock_waits_summary_by_table_row
+ if err := rows.Scan(
+ &r.OBJECT_TYPE,
+ &r.OBJECT_SCHEMA,
+ &r.OBJECT_NAME,
+ &r.COUNT_STAR,
+ &r.SUM_TIMER_WAIT,
+ &r.SUM_TIMER_READ,
+ &r.SUM_TIMER_WRITE,
+ &r.SUM_TIMER_READ_WITH_SHARED_LOCKS,
+ &r.SUM_TIMER_READ_HIGH_PRIORITY,
+ &r.SUM_TIMER_READ_NO_INSERT,
+ &r.SUM_TIMER_READ_NORMAL,
+ &r.SUM_TIMER_READ_EXTERNAL,
+ &r.SUM_TIMER_WRITE_ALLOW_WRITE,
+ &r.SUM_TIMER_WRITE_CONCURRENT_INSERT,
+ &r.SUM_TIMER_WRITE_LOW_PRIORITY,
+ &r.SUM_TIMER_WRITE_NORMAL,
+ &r.SUM_TIMER_WRITE_EXTERNAL); err != nil {
+ log.Fatal(err)
+ }
+ // we collect all data as we may need it later
+ t = append(t, r)
+ }
+ if err := rows.Err(); err != nil {
+ log.Fatal(err)
+ }
+
+ return t
+}
+
+func (t table_lock_waits_summary_by_table_rows) Len() int { return len(t) }
+func (t table_lock_waits_summary_by_table_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t table_lock_waits_summary_by_table_rows) Less(i, j int) bool {
+ return (t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT) ||
+ ((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) &&
+ (t[i].OBJECT_SCHEMA < t[j].OBJECT_SCHEMA) &&
+ (t[i].OBJECT_NAME < t[j].OBJECT_NAME))
+
+}
+
+// sort the data
+func (t *table_lock_waits_summary_by_table_rows) sort() {
+ sort.Sort(t)
+}
+
+// remove the initial values from those rows where there's a match
+// - if we find a row we can't match ignore it
+func (this *table_lock_waits_summary_by_table_rows) subtract(initial table_lock_waits_summary_by_table_rows) {
+ i_by_name := make(map[string]int)
+
+ // iterate over rows by name
+ for i := range initial {
+ i_by_name[initial[i].name()] = i
+ }
+
+ for i := range *this {
+ if _, ok := i_by_name[(*this)[i].name()]; ok {
+ initial_i := i_by_name[(*this)[i].name()]
+ (*this)[i].subtract(initial[initial_i])
+ }
+ }
+}
+
+// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
+// check this by comparing totals.
+func (t table_lock_waits_summary_by_table_rows) needs_refresh(t2 table_lock_waits_summary_by_table_rows) bool {
+ my_totals := t.totals()
+ t2_totals := t2.totals()
+
+ return my_totals.SUM_TIMER_WAIT > t2_totals.SUM_TIMER_WAIT
+}
+
+// describe a whole row
+func (r table_lock_waits_summary_by_table_row) String() string {
+ return fmt.Sprintf("%-30s|%10s %10s %10s|%10s %10s %10s %10s %10s|%10s %10s %10s %10s %10s",
+ r.pretty_name(),
+ lib.FormatTime(r.SUM_TIMER_WAIT),
+ lib.FormatTime(r.SUM_TIMER_READ),
+ lib.FormatTime(r.SUM_TIMER_WRITE),
+
+ lib.FormatTime(r.SUM_TIMER_READ_WITH_SHARED_LOCKS),
+ lib.FormatTime(r.SUM_TIMER_READ_HIGH_PRIORITY),
+ lib.FormatTime(r.SUM_TIMER_READ_NO_INSERT),
+ lib.FormatTime(r.SUM_TIMER_READ_NORMAL),
+ lib.FormatTime(r.SUM_TIMER_READ_EXTERNAL),
+
+ lib.FormatTime(r.SUM_TIMER_WRITE_ALLOW_WRITE),
+ lib.FormatTime(r.SUM_TIMER_WRITE_CONCURRENT_INSERT),
+ lib.FormatTime(r.SUM_TIMER_WRITE_LOW_PRIORITY),
+ lib.FormatTime(r.SUM_TIMER_WRITE_NORMAL),
+ lib.FormatTime(r.SUM_TIMER_WRITE_EXTERNAL))
+}
+
+// describe a whole table
+func (t table_lock_waits_summary_by_table_rows) String() string {
+ s := make([]string, len(t))
+
+ for i := range t {
+ s = append(s, t[i].String())
+ }
+
+ return strings.Join(s, "\n")
+}
--- /dev/null
+// This file configures the screen, basically remembering the size
+// and foreground and background colours.
+package screen
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ "github.com/nsf/termbox-go"
+
+ "github.com/sjmudd/pstop/lib"
+ "github.com/sjmudd/pstop/version"
+)
+
+// this just allows me to use stuff with it
+type TermboxScreen struct {
+ width, height int
+ fg, bg termbox.Attribute
+}
+
+type TermboxAttribute termbox.Attribute
+
+// reset the termbox to a clear screen
+func (s *TermboxScreen) Initialise() {
+ err := termbox.Init()
+ if err != nil {
+ fmt.Println("Could not start termbox for " + lib.MyName() + ". View ~/." + lib.MyName() + ".log for error messages.")
+ log.Printf("Cannot start "+lib.MyName()+", termbox.Init() gave an error:\n%s\n", err)
+ os.Exit(1)
+ }
+
+ s.Clear()
+ s.fg = termbox.ColorDefault
+ s.bg = termbox.ColorDefault
+
+ x, y := termbox.Size()
+ s.SetSize(x, y)
+}
+
+// clear the screen
+func (s *TermboxScreen) Clear() {
+ termbox.Clear(termbox.ColorWhite, termbox.ColorBlack)
+}
+
+func (s *TermboxScreen) Flush() {
+ termbox.Flush()
+}
+
+func (s *TermboxScreen) SetSize(width, height int) {
+ // if we get bigger then clear out the bottom line
+ for x := 0; x < s.width; x++ {
+ termbox.SetCell(x, s.height-1, ' ', s.fg, s.bg)
+ }
+ s.Flush()
+
+ s.width = width
+ s.height = height
+}
+
+func (s *TermboxScreen) Size() (int, int) {
+ return s.width, s.height
+}
+
+func (s *TermboxScreen) Height() int {
+ return s.height
+}
+
+// print the characters but don't print them outside the screen
+func (s *TermboxScreen) PrintAt(x int, y int, text string) {
+ offset := 0
+ for c := range text {
+ if (x + offset) < s.width {
+ termbox.SetCell(x+offset, y, rune(text[c]), s.fg, s.bg)
+ offset++
+ }
+ }
+ s.Flush()
+}
+
+func (s *TermboxScreen) DisplayHelp() {
+ s.PrintAt(0, 0, lib.MyName()+" version "+version.Version()+" (C) 2014 Simon J Mudd <sjmudd@pobox.com>")
+
+ s.PrintAt(0, 2, "Program to show the top I/O information by accessing information from the")
+ s.PrintAt(0, 3, "performance_schema schema. Ideas based on mysql-sys.")
+
+ s.PrintAt(0, 5, "Keys:")
+ s.PrintAt(0, 6, "- - reduce the poll interval by 1 second (minimum 1 second)")
+ s.PrintAt(0, 7, "+ - increase the poll interval by 1 second")
+ s.PrintAt(0, 8, "h - this help screen")
+ s.PrintAt(0, 9, "q - quit")
+ s.PrintAt(0, 10, "t - toggle between showing time since resetting statistics or since P_S data was collected")
+ s.PrintAt(0, 11, "z - reset statistics")
+ s.PrintAt(0, 12, "<tab> - change display modes between: latency, ops, file I/O and lock modes")
+ s.PrintAt(0, 14, "Press h to return to main screen")
+}
+
+func (s *TermboxScreen) Close() {
+ termbox.Close()
+}
--- /dev/null
+// lib - library routines for pstop.
+//
+// this file contains the library routines related to the stored state in pstop.
+package state
+
+import (
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/sjmudd/pstop/lib"
+ fsbi "github.com/sjmudd/pstop/performance_schema/file_summary_by_instance"
+ "github.com/sjmudd/pstop/performance_schema/ps_table"
+ tiwsbt "github.com/sjmudd/pstop/performance_schema/table_io_waits_summary_by_table"
+ tlwsbt "github.com/sjmudd/pstop/performance_schema/table_lock_waits_summary_by_table"
+ "github.com/sjmudd/pstop/screen"
+ "github.com/sjmudd/pstop/version"
+)
+
+// what information to show
+type Show int
+
+const (
+ showLatency = iota
+ showOps = iota
+ showIO = iota
+ showLocks = iota
+)
+
+type State struct {
+ datadir string
+ dbh *sql.DB
+ help bool
+ hostname string
+ fsbi ps_table.Tabler // ufsbi.File_summary_by_instance
+ tiwsbt tiwsbt.Table_io_waits_summary_by_table
+ tlwsbt ps_table.Tabler // tlwsbt.Table_lock_waits_summary_by_table
+ screen screen.TermboxScreen
+ show Show
+ mysql_version string
+ want_relative_stats bool
+}
+
+func (state *State) Setup(dbh *sql.DB) {
+ state.dbh = dbh
+
+ state.screen.Initialise()
+
+ _, variables := lib.SelectAllGlobalVariablesByVariableName(state.dbh)
+ // setup to their initial types/values
+ state.fsbi = fsbi.NewFileSummaryByInstance(variables)
+ state.tlwsbt = new(tlwsbt.Table_lock_waits_summary_by_table)
+
+ state.want_relative_stats = true // we show info from the point we start collecting data
+ state.fsbi.SetWantRelativeStats(state.want_relative_stats)
+ state.fsbi.SetNow()
+ state.tlwsbt.SetWantRelativeStats(state.want_relative_stats)
+ state.tlwsbt.SetNow()
+ state.tiwsbt.SetWantRelativeStats(state.want_relative_stats)
+ state.tiwsbt.SetNow()
+
+ state.ResetDBStatistics()
+
+ state.SetHelp(false)
+ state.show = showLatency
+ state.tiwsbt.SetWantsLatency(true)
+
+ _, hostname := lib.SelectGlobalVariableByVariableName(state.dbh, "HOSTNAME")
+ _, mysql_version := lib.SelectGlobalVariableByVariableName(state.dbh, "VERSION")
+ _, datadir := lib.SelectGlobalVariableByVariableName(state.dbh, "DATADIR")
+ state.SetHostname(hostname)
+ state.SetMySQLVersion(mysql_version)
+ state.SetDatadir(datadir)
+}
+
+// do a fresh collection of data and then update the initial values based on that.
+func (state *State) ResetDBStatistics() {
+ state.fsbi.Collect(state.dbh)
+ state.fsbi.UpdateInitialValues()
+
+ state.tlwsbt.Collect(state.dbh)
+ state.tlwsbt.UpdateInitialValues()
+
+ state.tiwsbt.Collect(state.dbh)
+ state.tiwsbt.UpdateInitialValues()
+}
+
+func (state State) MySQLVersion() string {
+ return state.mysql_version
+}
+
+func (state State) Datadir() string {
+ return state.datadir
+}
+
+func (state *State) SetHelp(newHelp bool) {
+ state.help = newHelp
+
+ state.screen.Clear()
+ state.screen.Flush()
+}
+
+func (state *State) SetDatadir(datadir string) {
+ state.datadir = datadir
+}
+
+func (state *State) SetMySQLVersion(mysql_version string) {
+ state.mysql_version = mysql_version
+}
+
+func (state *State) SetHostname(hostname string) {
+ state.hostname = hostname
+}
+
+func (state State) Help() bool {
+ return state.help
+}
+
+// display the output according to the mode we are in
+func (state *State) Display() {
+ if state.help {
+ state.screen.DisplayHelp()
+ } else {
+ state.displayHeading()
+ switch state.show {
+ case showLatency, showOps:
+ state.displayOpsOrLatency()
+ case showIO:
+ state.displayIO()
+ case showLocks:
+ state.displayLocks()
+ }
+ }
+}
+
+// change to the next display mode
+func (state *State) DisplayNext() {
+ if state.show == showLocks {
+ state.show = showLatency
+ } else {
+ state.show++
+ }
+ // this needs to be done more cleanly
+ if state.show == showLatency {
+ state.tiwsbt.SetWantsLatency(true)
+ }
+ if state.show == showOps {
+ state.tiwsbt.SetWantsLatency(false)
+ }
+ state.screen.Clear()
+ state.screen.Flush()
+}
+
+func (state State) displayHeading() {
+ state.displayLine0()
+ state.displayLine1()
+}
+
+func (state State) displayLine0() {
+ _, uptime := lib.SelectGlobalStatusByVariableName(state.dbh, "UPTIME")
+ top_line := lib.MyName() + " " + version.Version() + " - " + now_hhmmss() + " " + state.hostname + " / " + state.mysql_version + ", up " + fmt.Sprintf("%-16s", lib.Uptime(uptime))
+ if state.want_relative_stats {
+ now := time.Now()
+
+ var initial time.Time
+
+ switch state.show {
+ case showLatency, showOps:
+ initial = state.tiwsbt.Last()
+ case showIO:
+ initial = state.fsbi.Last()
+ case showLocks:
+ initial = state.tlwsbt.Last()
+ default:
+ initial = time.Now() // THIS IS WRONG !!!
+ }
+
+ d := now.Sub(initial)
+
+ top_line = top_line + " [REL] " + fmt.Sprintf("%.0f seconds", d.Seconds())
+ } else {
+ top_line = top_line + " [ABS] "
+ }
+ state.screen.PrintAt(0, 0, top_line)
+}
+
+func (state State) displayLine1() {
+ switch state.show {
+ case showLatency, showOps:
+ state.screen.PrintAt(0, 1, state.tiwsbt.Description())
+ case showIO:
+ state.screen.PrintAt(0, 1, state.fsbi.Description())
+ case showLocks:
+ state.screen.PrintAt(0, 1, state.tlwsbt.Description())
+ default:
+ state.screen.PrintAt(0, 1, "UNKNOWN")
+ }
+}
+
+func (state *State) displayOpsOrLatency() {
+ state.tiwsbt.Collect(state.dbh)
+
+ state.screen.PrintAt(0, 2, state.tiwsbt.Headings())
+
+ max_rows := state.screen.Height() - 3
+ row_content := state.tiwsbt.RowContent(max_rows)
+
+ // print out rows
+ for k := range row_content {
+ y := 3 + k
+ state.screen.PrintAt(0, y, row_content[k])
+ }
+ // print out empty rows
+ for k := len(row_content); k < (state.screen.Height() - 3); k++ {
+ y := 3 + k
+ if y < state.screen.Height()-1 {
+ state.screen.PrintAt(0, y, state.tiwsbt.EmptyRowContent())
+ }
+ }
+
+ // print out the totals at the bottom
+ state.screen.PrintAt(0, state.screen.Height()-1, state.tiwsbt.TotalRowContent())
+}
+
+// show actual I/O latency values
+func (state State) displayIO() {
+ state.fsbi.Collect(state.dbh)
+
+ state.screen.PrintAt(0, 2, state.fsbi.Headings())
+
+ // print out the data
+ max_rows := state.screen.Height() - 3
+ row_content := state.fsbi.RowContent(max_rows)
+
+ // print out rows
+ for k := range row_content {
+ y := 3 + k
+ state.screen.PrintAt(0, y, row_content[k])
+ }
+ // print out empty rows
+ for k := len(row_content); k < (state.screen.Height() - 3); k++ {
+ y := 3 + k
+ if y < state.screen.Height()-1 {
+ state.screen.PrintAt(0, y, state.fsbi.EmptyRowContent())
+ }
+ }
+
+ // print out the totals at the bottom
+ state.screen.PrintAt(0, state.screen.Height()-1, state.fsbi.TotalRowContent())
+}
+
+func (state *State) displayLocks() {
+ state.tlwsbt.Collect(state.dbh)
+
+ state.screen.PrintAt(0, 2, state.tlwsbt.Headings())
+
+ // print out the data
+ max_rows := state.screen.Height() - 3
+ row_content := state.tlwsbt.RowContent(max_rows)
+
+ // print out rows
+ for k := range row_content {
+ y := 3 + k
+ state.screen.PrintAt(0, y, row_content[k])
+ }
+ // print out empty rows
+ for k := len(row_content); k < (state.screen.Height() - 3); k++ {
+ y := 3 + k
+ if y < state.screen.Height()-1 {
+ state.screen.PrintAt(0, y, state.tlwsbt.EmptyRowContent())
+ }
+ }
+
+ // print out the totals at the bottom
+ state.screen.PrintAt(0, state.screen.Height()-1, state.tlwsbt.TotalRowContent())
+}
+
+// do we want to show all p_s data?
+func (state State) WantRelativeStats() bool {
+ return state.want_relative_stats
+}
+
+// set if we want data from when we started/reset stats.
+func (state *State) SetWantRelativeStats(want_relative_stats bool) {
+ state.want_relative_stats = want_relative_stats
+
+ state.fsbi.SetWantRelativeStats(want_relative_stats)
+ state.tlwsbt.SetWantRelativeStats(state.want_relative_stats)
+ state.tiwsbt.SetWantRelativeStats(state.want_relative_stats)
+
+ state.Display()
+}
+
+// if there's a better way of doing this do it better ...
+func now_hhmmss() string {
+ t := time.Now()
+ return fmt.Sprintf("%2d:%02d:%02d", t.Hour(), t.Minute(), t.Second())
+}
+
+// record the latest screen size
+func (state *State) ScreenSetSize(width, height int) {
+ state.screen.SetSize(width, height)
+}
+
+// clean up screen and disconnect database
+func (state *State) Cleanup() {
+ state.screen.Close()
+ if state.dbh != nil {
+ _ = state.dbh.Close()
+ }
+}
--- /dev/null
+package version
+
+const (
+ version = "0.0.10"
+)
+
+func Version() string {
+ return version
+}