From: Simon J Mudd Date: Tue, 9 Dec 2014 23:01:37 +0000 (+0100) Subject: Move to i_s / p_s and add user info (not quite complete) X-Git-Url: http://git.iain.cx/?p=pstop.git;a=commitdiff_plain;h=52d3c00ce729f908f342e10f48818b112dec0130 Move to i_s / p_s and add user info (not quite complete) --- diff --git a/i_s/output_format.txt b/i_s/output_format.txt new file mode 100644 index 0000000..6df8e20 --- /dev/null +++ b/i_s/output_format.txt @@ -0,0 +1,71 @@ + +8 7 6 5 4 3 2 1 +09876543210987654321098765432109876543210987654321098765432109876543210987654321 + +pstop v0.0.19 uptime .... +Active Query Information by User + --- Queries --- |-- InnoDB --| | +Totl Ins Upd Del|wr MB/s rd|Conn Act|Con/s Abrt/s ../s +9999 9999 9999 9999|111.1 111.1|9999 999|999.9 999.9 + +user_name |QueryTim %age||Select Insert Update Delete Other +xxxxxxxxxx |hh:mm:ss 10.0%|100.00% 100.00% 100.00% 100.00% 100.00% +... +TOTAL |hh:mm:ss 10.0%|100.00% 100.00% 100.00% 100.00% 100.00% + + +root@localhost [(none)]> show create table information_Schema.global_status\G +*************************** 1. row *************************** + Table: GLOBAL_STATUS +Create Table: CREATE TEMPORARY TABLE `GLOBAL_STATUS` ( + `VARIABLE_NAME` varchar(64) NOT NULL DEFAULT '', + `VARIABLE_VALUE` varchar(1024) DEFAULT NULL +) ENGINE=MEMORY DEFAULT CHARSET=utf8 +1 row in set (0.01 sec) + + +Standard output is in the format: + + key read misses -innodb- table qcache ------ theads ----- IO MB/s tmp dsk + qps innodb myisam log wait locks hits conn actv crea abrt rd wr tables +9999 99.99% 99.99% 99999999 99.99% 999.9% 9999 9999 9999 9999 999 999 99.99% + + +# keys used for calculating delta values +my @delta_keys = qw( + Aborted_clients + Com_select + Created_tmp_disk_tables + Created_tmp_tables + Innodb_buffer_pool_read_requests + Innodb_buffer_pool_reads + Innodb_data_read + Innodb_data_written + Innodb_log_waits + Key_read_requests + Key_reads + Qcache_hits + Questions + Table_locks_immediate + Table_locks_waited + Threads_created + Uptime + ); + +# Add more values if using extended output. +push @delta_keys, qw ( + Com_insert + Com_insert_select + Com_delete + Com_delete_multi + Com_update + Com_update_multi + Com_replace + Com_replace_select + Qcache_lowmem_prunes +) if $extended_output; + + + +thread_cache_size +max_connections diff --git a/i_s/pl_by_user.go b/i_s/pl_by_user.go new file mode 100644 index 0000000..9ad2c8d --- /dev/null +++ b/i_s/pl_by_user.go @@ -0,0 +1,135 @@ +package i_s + +import ( + "fmt" + "sort" + + "github.com/sjmudd/pstop/lib" +) + +/* +root@localhost [i_s]> show create table i_s\G +*************************** 1. row *************************** +CREATE TEMPORARY TABLE `PROCESSLIST` ( + `ID` bigint(21) unsigned NOT NULL DEFAULT '0', + `USER` varchar(16) NOT NULL DEFAULT '', + `HOST` varchar(64) NOT NULL DEFAULT '', + `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', `TIME` int(7) NOT NULL DEFAULT '0', `STATE` varchar(64) DEFAULT NULL, + `INFO` longtext +) ENGINE=MyISAM DEFAULT CHARSET=utf8 +1 row in set (0.02 sec) +*/ + +// a summary row of information taken from information_schema.processlist +type pl_by_user_row struct { + username string + runtime uint64 + connections uint64 + active uint64 + hosts uint64 + dbs uint64 + selects uint64 + inserts uint64 + updates uint64 + deletes uint64 + other uint64 +} +type pl_by_user_rows []pl_by_user_row + +/* +username |Run Time %age|Conn Actv|Hosts DBs|Select Insert Update Delete Other| +xxxxxxxxxxxxxx|hh:mm:ss 100.0%|9999 9999|9999 999|100.0% 100.0% 100.0% 100.0% 100.0%| +*/ + +func (r *pl_by_user_row) headings() string { + return fmt.Sprintf("%-14s|%10s %6s|%5s %4s|%4s %3s|%6s %6s %6s %6s %6s|", + "username", "Run Time", "%", "Conn", "Actv", "Hosts", "DBs", "Select", "Insert", "Update", "Delete", "Other") +} + +// generate a printable result +func (r *pl_by_user_row) row_content(totals pl_by_user_row) string { + var u string + if len(r.username) == 0 { + u = "" + } else if len(r.username) > 14 { + u = r.username[0:14] + } else { + u = r.username + } + return fmt.Sprintf("%-14s|%10s %6s|%5s %4s|%4s %3s|%6s %6s %6s %6s %6s|", + u, + lib.FormatTime(r.runtime), + lib.FormatPct(lib.MyDivide(r.runtime, totals.runtime)), + lib.FormatAmount(r.connections), + lib.FormatAmount(r.active), + lib.FormatAmount(r.hosts), + lib.FormatAmount(r.dbs), + lib.FormatAmount(r.selects), + lib.FormatAmount(r.inserts), + lib.FormatAmount(r.updates), + lib.FormatAmount(r.deletes), + lib.FormatAmount(r.other)) +} + +// generate a row of totals from a table +func (t pl_by_user_rows) totals() pl_by_user_row { + var totals pl_by_user_row + totals.username = "TOTALS" + + for i := range t { + totals.runtime += t[i].runtime + totals.connections += t[i].connections + totals.active += t[i].active + // totals.hosts += t[i].hosts This needs to be done differently to get the total number of distinct hosts + // totals.dbs += t[i].dbs This needs to be done differently to get the total number of distinct dbs + totals.selects += t[i].selects + totals.inserts += t[i].inserts + totals.updates += t[i].updates + totals.deletes += t[i].deletes + totals.other += t[i].other + } + + return totals +} + +func (t pl_by_user_rows) Headings() string { + var r pl_by_user_row + return r.headings() +} + +// describe a whole row +func (r pl_by_user_row) String() string { + return fmt.Sprintf("%v %v %v %v %v %v %v %v %v", r.username, r.runtime, r.connections, r.active, r.hosts, r.dbs, r.selects, r.inserts, r.updates, r.deletes, r.other) +} + +// describe a whole table +func (t pl_by_user_rows) String() string { + s := "" + for i := range t { + s = s + t[i].String() + "\n" + } + return s +} + +// for sorting +type ByRunTime pl_by_user_rows + +func (t ByRunTime) Len() int { return len(t) } +func (t ByRunTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t ByRunTime) Less(i, j int) bool { + return (t[i].runtime > t[j].runtime) || + ((t[i].runtime == t[j].runtime) && (t[i].connections > t[j].connections)) +} + +func (t pl_by_user_rows) Sort() { + sort.Sort(ByRunTime(t)) +} + +func (r pl_by_user_row) Description() string { + return "no description" +} + +func (t pl_by_user_rows) emptyRowContent() string { + var r pl_by_user_row + return r.row_content(r) +} diff --git a/i_s/processlist.go b/i_s/processlist.go new file mode 100644 index 0000000..2be6d33 --- /dev/null +++ b/i_s/processlist.go @@ -0,0 +1,201 @@ +// i_s - library routines for pstop. +// +// This file contains the library routines for managing the +// table_io_waits_by_table table. +package i_s + +import ( + "database/sql" + "fmt" + "github.com/sjmudd/pstop/lib" + "github.com/sjmudd/pstop/p_s" + "regexp" + "strings" + "time" +) + +type map_string_string map[string]string + +// a table of rows +type Processlist struct { + p_s.RelativeStats + p_s.InitialTime + current processlist_rows // processlist + results pl_by_user_rows // results by user + totals pl_by_user_row // totals of results +} + +// Collect() collects data from the db, updating initial +// values if needed, and then subtracting initial values if we want +// relative values, after which it stores totals. +func (t *Processlist) Collect(dbh *sql.DB) { + lib.Logger.Println("Processlist.Collect() - starting collection of data") + start := time.Now() + + t.current = select_processlist(dbh) + lib.Logger.Println("t.current collected", len(t.current), "row(s) from SELECT") + + t.processlist2by_user() + + t.results.Sort() + // lib.Logger.Println( "- collecting t.totals from t.results" ) + t.totals = t.results.totals() + + lib.Logger.Println("Processlist.Collect() END, took:", time.Duration(time.Since(start)).String()) +} + +func (t *Processlist) Headings() string { + return t.results.Headings() +} + +func (t Processlist) EmptyRowContent() string { + return t.results.emptyRowContent() +} + +func (t Processlist) TotalRowContent() string { + return t.totals.row_content(t.totals) +} + +func (t Processlist) RowContent(max_rows int) []string { + rows := make([]string, 0, max_rows) + + for i := range t.results { + if i < max_rows { + rows = append(rows, t.results[i].row_content(t.totals)) + } + } + + return rows +} + +func (t Processlist) Description() string { + count := t.count_rows() + return fmt.Sprintf("User Information (processlist) %d rows", count) +} + +func (t Processlist) count_rows() int { + var count int + for row := range t.results { + if t.results[row].username != "" { + count++ + } + } + return count +} + +// return the hostname without the port part +func get_hostname(h_p string) string { + i := strings.Index(h_p, ":") + if i >= 0 { + return h_p[0:i] + } else { + return h_p // shouldn't happen !!! + } +} + +// read in processlist and add the appropriate values into a new pl_by_user table +func (t *Processlist) processlist2by_user() { + lib.Logger.Println("Processlist.processlist2by_user() START") + + var re_active_repl_master_thread *regexp.Regexp = regexp.MustCompile("Sending binlog event to slave") + var re_select *regexp.Regexp = regexp.MustCompile(`SELECT(?i)`) // make case insensitive + var re_insert *regexp.Regexp = regexp.MustCompile(`INSERT(?i)`) // make case insensitive + var re_update *regexp.Regexp = regexp.MustCompile(`UPDATE(?i)`) // make case insensitive + var re_delete *regexp.Regexp = regexp.MustCompile(`DELETE(?i)`) // make case insensitive + + var row pl_by_user_row + var results pl_by_user_rows + var my_hosts map_string_string + var my_db map_string_string + var ok bool + // map username to row details + users := make(map[string]pl_by_user_row) + hosts_by_user := make(map[string]map_string_string) + dbs_by_user := make(map[string]map_string_string) + // var dbs map[string]map_string_string + // Command state + // | 3859522915 | m1m1repl | bc210bprdb-01.lhr4.prod.booking.com:58703 | NULL | Binlog Dump | 4165475 | Master has sent all binlog to slave; waiting for binlog to be updated | NULL | + // | 4179949288 | m1m1repl | xc238bprdb-01.lhr4.prod.booking.com:34391 | NULL | Binlog Dump | 3053011 | Master has sent all binlog to slave; waiting for binlog to be updated | NULL | + // | 4336765784 | m1m1repl | bc279bprdb-01.lhr4.prod.booking.com:50991 | NULL | Binlog Dump | 2523403 | Sending binlog event to slave | NULL | + + for i := range t.current { + // munge the username for special purposes (event scheduler, replication threads etc) + id := t.current[i].ID + username := t.current[i].USER // limit size for display + host := get_hostname(t.current[i].HOST) + command := t.current[i].COMMAND + db := t.current[i].DB + info := t.current[i].INFO + state := t.current[i].STATE + + lib.Logger.Println("- id/user/host:", id, username, host) + + if old_row, ok := users[username]; ok { + lib.Logger.Println("- found old row in users") + row = old_row // get old row + } else { + lib.Logger.Println("- NOT found old row in users") + // create new row - RESET THE VALUES !!!! + rowp := new(pl_by_user_row) + row = *rowp + row.username = t.current[i].USER + users[username] = row + } + row.connections++ + // ignore system SQL threads (may be more to filter out) + if username != "system user" && host != "" && command != "Sleep" && command != "Binlog Dump" { + row.runtime += t.current[i].TIME + row.active++ + } + if command == "Binlog Dump" && re_active_repl_master_thread.MatchString(state) { + row.active++ + } + + // add the host if not known already + if host != "" { + if my_hosts, ok = hosts_by_user[username]; !ok { + my_hosts = make(map_string_string) + } + my_hosts[host] = host // whatever - value doesn't matter + hosts_by_user[username] = my_hosts + } + row.hosts = uint64(len(hosts_by_user[username])) + + // add the db count if not known already + if db != "" { + if my_db, ok = dbs_by_user[username]; !ok { + my_db = make(map_string_string) + } + my_db[db] = db // whatever - value doesn't matter + dbs_by_user[username] = my_db + } + row.dbs = uint64(len(dbs_by_user[username])) + + // selects + if re_select.MatchString(info) == true { + row.selects++ + } + if re_insert.MatchString(info) == true { + row.inserts++ + } + if re_update.MatchString(info) == true { + row.updates++ + } + if re_delete.MatchString(info) == true { + row.deletes++ + } + + users[username] = row + } + + results = make(pl_by_user_rows, 0, len(users)) + for _, v := range users { + results = append(results, v) + } + t.results = results + t.results.Sort() // sort output + + t.totals = t.results.totals() + + lib.Logger.Println("Processlist.processlist2by_user() END") +} diff --git a/i_s/processlist_row.go b/i_s/processlist_row.go new file mode 100644 index 0000000..60aee47 --- /dev/null +++ b/i_s/processlist_row.go @@ -0,0 +1,101 @@ +// This file contains the library routines for managing the +// table_io_waits_by_table table. +package i_s + +import ( + "database/sql" + "fmt" + "log" +) + +/* +root@localhost [i_s]> show create table i_s\G +*************************** 1. row *************************** +CREATE TEMPORARY TABLE `PROCESSLIST` ( + `ID` bigint(21) unsigned NOT NULL DEFAULT '0', + `USER` varchar(16) NOT NULL DEFAULT '', + `HOST` varchar(64) NOT NULL DEFAULT '', + `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', `TIME` int(7) NOT NULL DEFAULT '0', `STATE` varchar(64) DEFAULT NULL, + `INFO` longtext +) ENGINE=MyISAM DEFAULT CHARSET=utf8 +1 row in set (0.02 sec) +*/ + +// a row from information_schema.processlist +type processlist_row struct { + ID uint64 + USER string + HOST string + DB string + COMMAND string + TIME uint64 + STATE string + INFO string +} +type processlist_rows []processlist_row + +// get the output of I_S.PROCESSLIST +func select_processlist(dbh *sql.DB) processlist_rows { + var t processlist_rows + var id sql.NullInt64 + var user sql.NullString + var host sql.NullString + var db sql.NullString + var command sql.NullString + var time sql.NullInt64 + var state sql.NullString + var info sql.NullString + + // we collect all information even if it's mainly empty as we may reference it later + + sql := "SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO FROM INFORMATION_SCHEMA.PROCESSLIST" + + rows, err := dbh.Query(sql) + if err != nil { + log.Fatal(err) + } + defer rows.Close() + + for rows.Next() { + var r processlist_row + if err := rows.Scan( + &id, + &user, + &host, + &db, + &command, + &time, + &state, + &info); err != nil { + log.Fatal(err) + } + r.ID = uint64(id.Int64) + r.USER = user.String + r.HOST = host.String + if db.Valid { + r.DB = db.String + } + r.COMMAND = command.String + r.TIME = uint64(time.Int64 * 1000000000000) + if state.Valid { + r.STATE = state.String + } + r.INFO = info.String + t = append(t, r) + } + if err := rows.Err(); err != nil { + log.Fatal(err) + } + + return t +} + +// describe a whole row +func (r processlist_row) String() string { + return fmt.Sprintf("FIXME otuput of i_s") +} + +// describe a whole table +func (t processlist_rows) String() string { + return fmt.Sprintf("FIXME otuput of i_s") +} diff --git a/performance_schema/file_summary_by_instance/cache.go b/p_s/file_summary_by_instance/cache.go similarity index 100% rename from performance_schema/file_summary_by_instance/cache.go rename to p_s/file_summary_by_instance/cache.go diff --git a/performance_schema/file_summary_by_instance/file_summary_by_instance.go b/p_s/file_summary_by_instance/file_summary_by_instance.go similarity index 97% rename from performance_schema/file_summary_by_instance/file_summary_by_instance.go rename to p_s/file_summary_by_instance/file_summary_by_instance.go index 737caa8..c1e402e 100644 --- a/performance_schema/file_summary_by_instance/file_summary_by_instance.go +++ b/p_s/file_summary_by_instance/file_summary_by_instance.go @@ -1,4 +1,4 @@ -// performance_schema - library routines for pstop. +// p_s - library routines for pstop. // // This file contains the library routines for managing the // file_summary_by_instance table. @@ -10,7 +10,7 @@ import ( "time" "github.com/sjmudd/pstop/lib" - ps "github.com/sjmudd/pstop/performance_schema" + "github.com/sjmudd/pstop/p_s" ) /* @@ -47,8 +47,8 @@ CREATE TABLE `file_summary_by_instance` ( // a table of rows type File_summary_by_instance struct { - ps.RelativeStats - ps.InitialTime + p_s.RelativeStats + p_s.InitialTime initial file_summary_by_instance_rows current file_summary_by_instance_rows results file_summary_by_instance_rows diff --git a/performance_schema/file_summary_by_instance/file_summary_by_instance_row.go b/p_s/file_summary_by_instance/file_summary_by_instance_row.go similarity index 100% rename from performance_schema/file_summary_by_instance/file_summary_by_instance_row.go rename to p_s/file_summary_by_instance/file_summary_by_instance_row.go diff --git a/performance_schema/ps_table/ps_table.go b/p_s/ps_table/ps_table.go similarity index 100% rename from performance_schema/ps_table/ps_table.go rename to p_s/ps_table/ps_table.go diff --git a/performance_schema/ps_table/ps_table_row.go b/p_s/ps_table/ps_table_row.go similarity index 100% rename from performance_schema/ps_table/ps_table_row.go rename to p_s/ps_table/ps_table_row.go diff --git a/performance_schema/relative_stats.go b/p_s/relative_stats.go similarity index 93% rename from performance_schema/relative_stats.go rename to p_s/relative_stats.go index 2a9ec2e..438fafd 100644 --- a/performance_schema/relative_stats.go +++ b/p_s/relative_stats.go @@ -1,7 +1,7 @@ // performance_schema - library routines for pstop. // // want_relative_stats -package performance_schema +package p_s // a table of rows type RelativeStats struct { diff --git a/performance_schema/relative_time.go b/p_s/relative_time.go similarity index 88% rename from performance_schema/relative_time.go rename to p_s/relative_time.go index d831e6a..2339d28 100644 --- a/performance_schema/relative_time.go +++ b/p_s/relative_time.go @@ -1,4 +1,4 @@ -package performance_schema +package p_s import ( "time" diff --git a/performance_schema/table_io_waits_summary_by_table/table_io_waits_summary_by_table.go b/p_s/table_io_waits_summary_by_table/table_io_waits_summary_by_table.go similarity index 94% rename from performance_schema/table_io_waits_summary_by_table/table_io_waits_summary_by_table.go rename to p_s/table_io_waits_summary_by_table/table_io_waits_summary_by_table.go index 6f8a232..8c9620b 100644 --- a/performance_schema/table_io_waits_summary_by_table/table_io_waits_summary_by_table.go +++ b/p_s/table_io_waits_summary_by_table/table_io_waits_summary_by_table.go @@ -1,4 +1,4 @@ -// performance_schema - library routines for pstop. +// p_s - library routines for pstop. // // This file contains the library routines for managing the // table_io_waits_by_table table. @@ -10,13 +10,13 @@ import ( "time" "github.com/sjmudd/pstop/lib" - ps "github.com/sjmudd/pstop/performance_schema" + "github.com/sjmudd/pstop/p_s" ) // a table of rows type Table_io_waits_summary_by_table struct { - ps.RelativeStats - ps.InitialTime + p_s.RelativeStats + p_s.InitialTime want_latency bool initial table_io_waits_summary_by_table_rows // initial data for relative values current table_io_waits_summary_by_table_rows // last loaded values @@ -42,14 +42,14 @@ func (t *Table_io_waits_summary_by_table) Collect(dbh *sql.DB) { lib.Logger.Println("t.current collected", len(t.current), "row(s) from SELECT") if len(t.initial) == 0 && len(t.current) > 0 { - lib.Logger.Println("t.initial: copying from t.current (initial setup)" ) + lib.Logger.Println("t.initial: copying from t.current (initial setup)") t.initial = make(table_io_waits_summary_by_table_rows, len(t.current)) copy(t.initial, t.current) } // check for reload initial characteristics if t.initial.needs_refresh(t.current) { - lib.Logger.Println( "t.initial: copying from t.current (data needs refreshing)" ) + lib.Logger.Println("t.initial: copying from t.current (data needs refreshing)") t.initial = make(table_io_waits_summary_by_table_rows, len(t.current)) copy(t.initial, t.current) } @@ -58,8 +58,8 @@ func (t *Table_io_waits_summary_by_table) Collect(dbh *sql.DB) { // lib.Logger.Println( "t.initial:", t.initial ) // lib.Logger.Println( "t.current:", t.current ) - lib.Logger.Println("t.initial.totals():", t.initial.totals() ) - lib.Logger.Println("t.current.totals():", t.current.totals() ) + lib.Logger.Println("t.initial.totals():", t.initial.totals()) + lib.Logger.Println("t.current.totals():", t.current.totals()) // lib.Logger.Println("t.results:", t.results) // lib.Logger.Println("t.totals:", t.totals) lib.Logger.Println("Table_io_waits_summary_by_table.Collect() END, took:", time.Duration(time.Since(start)).String()) diff --git a/performance_schema/table_io_waits_summary_by_table/table_io_waits_summary_by_table_row.go b/p_s/table_io_waits_summary_by_table/table_io_waits_summary_by_table_row.go similarity index 100% rename from performance_schema/table_io_waits_summary_by_table/table_io_waits_summary_by_table_row.go rename to p_s/table_io_waits_summary_by_table/table_io_waits_summary_by_table_row.go diff --git a/performance_schema/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table.go b/p_s/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table.go similarity index 97% rename from performance_schema/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table.go rename to p_s/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table.go index 5ae561f..a4aacbd 100644 --- a/performance_schema/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table.go +++ b/p_s/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table.go @@ -8,13 +8,13 @@ import ( "time" "github.com/sjmudd/pstop/lib" - ps "github.com/sjmudd/pstop/performance_schema" + "github.com/sjmudd/pstop/p_s" ) // a table of rows type Table_lock_waits_summary_by_table struct { - ps.RelativeStats - ps.InitialTime + p_s.RelativeStats + p_s.InitialTime initial table_lock_waits_summary_by_table_rows // initial data for relative values current table_lock_waits_summary_by_table_rows // last loaded values results table_lock_waits_summary_by_table_rows // results (maybe with subtraction) diff --git a/performance_schema/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table_row.go b/p_s/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table_row.go similarity index 100% rename from performance_schema/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table_row.go rename to p_s/table_lock_waits_summary_by_table/table_lock_waits_summary_by_table_row.go diff --git a/state/state.go b/state/state.go index 0bacb2e..e0056ba 100644 --- a/state/state.go +++ b/state/state.go @@ -9,11 +9,12 @@ import ( "strings" "time" + "github.com/sjmudd/pstop/i_s" "github.com/sjmudd/pstop/lib" - fsbi "github.com/sjmudd/pstop/performance_schema/file_summary_by_instance" - "github.com/sjmudd/pstop/performance_schema/ps_table" - tiwsbt "github.com/sjmudd/pstop/performance_schema/table_io_waits_summary_by_table" - tlwsbt "github.com/sjmudd/pstop/performance_schema/table_lock_waits_summary_by_table" + fsbi "github.com/sjmudd/pstop/p_s/file_summary_by_instance" + "github.com/sjmudd/pstop/p_s/ps_table" + tiwsbt "github.com/sjmudd/pstop/p_s/table_io_waits_summary_by_table" + tlwsbt "github.com/sjmudd/pstop/p_s/table_lock_waits_summary_by_table" "github.com/sjmudd/pstop/screen" "github.com/sjmudd/pstop/version" ) @@ -26,6 +27,7 @@ const ( showOps = iota showIO = iota showLocks = iota + showUsers = iota ) type State struct { @@ -36,6 +38,7 @@ type State struct { fsbi ps_table.Tabler // ufsbi.File_summary_by_instance tiwsbt tiwsbt.Table_io_waits_summary_by_table tlwsbt ps_table.Tabler // tlwsbt.Table_lock_waits_summary_by_table + users i_s.Processlist screen screen.TermboxScreen show Show mysql_version string @@ -59,6 +62,8 @@ func (state *State) Setup(dbh *sql.DB) { state.tlwsbt.SetNow() state.tiwsbt.SetWantRelativeStats(state.want_relative_stats) state.tiwsbt.SetNow() + state.users.SetWantRelativeStats(state.want_relative_stats) // ignored + state.users.SetNow() // ignored state.ResetDBStatistics() @@ -110,6 +115,8 @@ func (state *State) Collect() { state.fsbi.Collect(state.dbh) case showLocks: state.tlwsbt.Collect(state.dbh) + case showUsers: + state.users.Collect(state.dbh) } lib.Logger.Println("state.Collect() took", time.Duration(time.Since(start)).String()) } @@ -158,13 +165,15 @@ func (state *State) Display() { state.displayIO() case showLocks: state.displayLocks() + case showUsers: + state.displayUsers() } } } // change to the next display mode func (state *State) DisplayNext() { - if state.show == showLocks { + if state.show == showUsers { state.show = showLatency } else { state.show++ @@ -200,8 +209,10 @@ func (state State) displayLine0() { initial = state.fsbi.Last() case showLocks: initial = state.tlwsbt.Last() + case showUsers: + initial = state.users.Last() default: - initial = time.Now() // THIS IS WRONG !!! + // should not get here ! } d := now.Sub(initial) @@ -223,6 +234,8 @@ func (state State) displayDescription() { description = state.fsbi.Description() case showLocks: description = state.tlwsbt.Description() + case showUsers: + description = state.users.Description() } state.screen.PrintAt(0, 1, description) @@ -300,6 +313,30 @@ func (state *State) displayLocks() { state.screen.PrintAt(0, state.screen.Height()-1, state.tlwsbt.TotalRowContent()) } +func (state *State) displayUsers() { + state.screen.PrintAt(0, 2, state.users.Headings()) + + // print out the data + max_rows := state.screen.Height() - 3 + row_content := state.users.RowContent(max_rows) + + // print out rows + for k := range row_content { + y := 3 + k + state.screen.PrintAt(0, y, row_content[k]) + } + // print out empty rows + for k := len(row_content); k < (state.screen.Height() - 3); k++ { + y := 3 + k + if y < state.screen.Height()-1 { + state.screen.PrintAt(0, y, state.users.EmptyRowContent()) + } + } + + // print out the totals at the bottom + state.screen.PrintAt(0, state.screen.Height()-1, state.users.TotalRowContent()) +} + // do we want to show all p_s data? func (state State) WantRelativeStats() bool { return state.want_relative_stats