-package i_s
+package processlist
import (
"fmt"
// This file contains the library routines for managing the
// table_io_waits_by_table table.
-package i_s
+package processlist
import (
"database/sql"
*/
// a row from information_schema.processlist
-type processlist_row struct {
+type table_row struct {
ID uint64
USER string
HOST string
STATE string
INFO string
}
-type processlist_rows []processlist_row
+type table_rows []table_row
// get the output of I_S.PROCESSLIST
-func select_processlist(dbh *sql.DB) processlist_rows {
- var t processlist_rows
+func select_processlist(dbh *sql.DB) table_rows {
+ var t table_rows
var id sql.NullInt64
var user sql.NullString
var host sql.NullString
defer rows.Close()
for rows.Next() {
- var r processlist_row
+ var r table_row
if err := rows.Scan(
&id,
&user,
}
// describe a whole row
-func (r processlist_row) String() string {
+func (r table_row) String() string {
return fmt.Sprintf("FIXME otuput of i_s")
}
// describe a whole table
-func (t processlist_rows) String() string {
+func (t table_rows) String() string {
return fmt.Sprintf("FIXME otuput of i_s")
}
//
// This file contains the library routines for managing the
// table_io_waits_by_table table.
-package i_s
+package processlist
import (
"database/sql"
type map_string_int map[string]int
// a table of rows
-type Processlist struct {
+type Object struct {
p_s.RelativeStats
p_s.InitialTime
- current processlist_rows // processlist
+ current table_rows // processlist
results pl_by_user_rows // results by user
totals pl_by_user_row // totals of results
}
// Collect() collects data from the db, updating initial
// values if needed, and then subtracting initial values if we want
// relative values, after which it stores totals.
-func (t *Processlist) Collect(dbh *sql.DB) {
- lib.Logger.Println("Processlist.Collect() - starting collection of data")
+func (t *Object) Collect(dbh *sql.DB) {
+ lib.Logger.Println("Object.Collect() - starting collection of data")
start := time.Now()
t.current = select_processlist(dbh)
// lib.Logger.Println( "- collecting t.totals from t.results" )
t.totals = t.results.totals()
- lib.Logger.Println("Processlist.Collect() END, took:", time.Duration(time.Since(start)).String())
+ lib.Logger.Println("Object.Collect() END, took:", time.Duration(time.Since(start)).String())
}
-func (t *Processlist) Headings() string {
+func (t *Object) Headings() string {
return t.results.Headings()
}
-func (t Processlist) EmptyRowContent() string {
+func (t Object) EmptyRowContent() string {
return t.results.emptyRowContent()
}
-func (t Processlist) TotalRowContent() string {
+func (t Object) TotalRowContent() string {
return t.totals.row_content(t.totals)
}
-func (t Processlist) RowContent(max_rows int) []string {
+func (t Object) RowContent(max_rows int) []string {
rows := make([]string, 0, max_rows)
for i := range t.results {
return rows
}
-func (t Processlist) Description() string {
+func (t Object) Description() string {
count := t.count_rows()
return fmt.Sprintf("Activity by Username (processlist) %d rows", count)
}
-func (t Processlist) count_rows() int {
+func (t Object) count_rows() int {
var count int
for row := range t.results {
if t.results[row].username != "" {
}
// read in processlist and add the appropriate values into a new pl_by_user table
-func (t *Processlist) processlist2by_user() {
- lib.Logger.Println("Processlist.processlist2by_user() START")
+func (t *Object) processlist2by_user() {
+ lib.Logger.Println("Object.processlist2by_user() START")
var re_active_repl_master_thread *regexp.Regexp = regexp.MustCompile("Sending binlog event to slave")
var re_select *regexp.Regexp = regexp.MustCompile(`(?i)SELECT`) // make case insensitive
t.totals = t.results.totals()
- lib.Logger.Println("Processlist.processlist2by_user() END")
+ lib.Logger.Println("Object.processlist2by_user() END")
}
+++ /dev/null
-// p_s - library routines for pstop.
-//
-// This file contains the library routines for managing the
-// file_summary_by_instance table.
-package file_summary_by_instance
-
-import (
- "database/sql"
- "fmt"
- "time"
-
- "github.com/sjmudd/pstop/lib"
- "github.com/sjmudd/pstop/p_s"
-)
-
-/*
-CREATE TABLE `file_summary_by_instance` (
- `FILE_NAME` varchar(512) NOT NULL,
- `EVENT_NAME` varchar(128) NOT NULL, // not collected
- `OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL, // not collected
- `COUNT_STAR` bigint(20) unsigned NOT NULL,
- `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
- `MIN_TIMER_WAIT` bigint(20) unsigned NOT NULL,
- `AVG_TIMER_WAIT` bigint(20) unsigned NOT NULL,
- `MAX_TIMER_WAIT` bigint(20) unsigned NOT NULL,
- `COUNT_READ` bigint(20) unsigned NOT NULL,
- `SUM_TIMER_READ` bigint(20) unsigned NOT NULL,
- `MIN_TIMER_READ` bigint(20) unsigned NOT NULL,
- `AVG_TIMER_READ` bigint(20) unsigned NOT NULL,
- `MAX_TIMER_READ` bigint(20) unsigned NOT NULL,
- `SUM_NUMBER_OF_BYTES_READ` bigint(20) NOT NULL,
- `COUNT_WRITE` bigint(20) unsigned NOT NULL,
- `SUM_TIMER_WRITE` bigint(20) unsigned NOT NULL,
- `MIN_TIMER_WRITE` bigint(20) unsigned NOT NULL,
- `AVG_TIMER_WRITE` bigint(20) unsigned NOT NULL,
- `MAX_TIMER_WRITE` bigint(20) unsigned NOT NULL,
- `SUM_NUMBER_OF_BYTES_WRITE` bigint(20) NOT NULL,
- `COUNT_MISC` bigint(20) unsigned NOT NULL,
- `SUM_TIMER_MISC` bigint(20) unsigned NOT NULL,
- `MIN_TIMER_MISC` bigint(20) unsigned NOT NULL,
- `AVG_TIMER_MISC` bigint(20) unsigned NOT NULL,
- `MAX_TIMER_MISC` bigint(20) unsigned NOT NULL
-) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
-1 row in set (0.00 sec)
-
-*/
-
-// a table of rows
-type File_summary_by_instance struct {
- p_s.RelativeStats
- p_s.InitialTime
- initial file_summary_by_instance_rows
- current file_summary_by_instance_rows
- results file_summary_by_instance_rows
- totals file_summary_by_instance_row
- global_variables map[string]string
-}
-
-// reset the statistics to current values
-func (t *File_summary_by_instance) SyncReferenceValues() {
- t.SetNow()
- t.initial = make(file_summary_by_instance_rows, len(t.current))
- copy(t.initial, t.current)
-
- t.results = make(file_summary_by_instance_rows, len(t.current))
- copy(t.results, t.current)
-
- if t.WantRelativeStats() {
- t.results.subtract(t.initial) // should be 0 if relative
- }
-
- t.results.sort()
- t.totals = t.results.totals()
-}
-
-// Collect data from the db, then merge it in.
-func (t *File_summary_by_instance) Collect(dbh *sql.DB) {
- start := time.Now()
- // UPDATE current from db handle
- t.current = merge_by_table_name(select_fsbi_rows(dbh), t.global_variables)
-
- // copy in initial data if it was not there
- if len(t.initial) == 0 && len(t.current) > 0 {
- t.initial = make(file_summary_by_instance_rows, len(t.current))
- copy(t.initial, t.current)
- }
-
- // check for reload initial characteristics
- if t.initial.needs_refresh(t.current) {
- t.initial = make(file_summary_by_instance_rows, len(t.current))
- copy(t.initial, t.current)
- }
-
- // update results to current value
- t.results = make(file_summary_by_instance_rows, len(t.current))
- copy(t.results, t.current)
-
- // make relative if need be
- if t.WantRelativeStats() {
- t.results.subtract(t.initial)
- }
-
- // sort the results
- t.results.sort()
-
- // setup the totals
- t.totals = t.results.totals()
- lib.Logger.Println("File_summary_by_instance.Collect() took:", time.Duration(time.Since(start)).String())
-}
-
-// return the headings for a table
-func (t File_summary_by_instance) Headings() string {
- var r file_summary_by_instance_row
-
- return r.headings()
-}
-
-// return the rows we need for displaying
-func (t File_summary_by_instance) RowContent(max_rows int) []string {
- rows := make([]string, 0, max_rows)
-
- for i := range t.results {
- if i < max_rows {
- rows = append(rows, t.results[i].row_content(t.totals))
- }
- }
-
- return rows
-}
-
-// return all the totals
-func (t File_summary_by_instance) TotalRowContent() string {
- return t.totals.row_content(t.totals)
-}
-
-// return an empty string of data (for filling in)
-func (t File_summary_by_instance) EmptyRowContent() string {
- var emtpy file_summary_by_instance_row
- return emtpy.row_content(emtpy)
-}
-
-func (t File_summary_by_instance) Description() string {
- count := t.count_rows()
- return fmt.Sprintf("I/O Latency by File (file_summary_by_instance) %4d row(s) ", count)
-}
-
-// create a new structure and include various variable values:
-// - datadir, relay_log
-// There's no checking that these are actually provided!
-func NewFileSummaryByInstance(global_variables map[string]string) *File_summary_by_instance {
- n := new(File_summary_by_instance)
-
- n.global_variables = global_variables
-
- return n
-}
-
-func (t File_summary_by_instance) count_rows() int {
- var count int
- for row := range t.results {
- if t.results[row].SUM_TIMER_WAIT > 0 {
- count++
- }
- }
- return count
-}
cache key_value_cache.KeyValueCache
)
-type file_summary_by_instance_row struct {
+type table_row struct {
FILE_NAME string
COUNT_STAR uint64
}
// represents a table or set of rows
-type file_summary_by_instance_rows []file_summary_by_instance_row
+type table_rows []table_row
// Return the name using the FILE_NAME attribute.
-func (r *file_summary_by_instance_row) name() string {
+func (r *table_row) name() string {
return r.FILE_NAME
}
// Return a formatted pretty name for the row.
-func (r *file_summary_by_instance_row) pretty_name() string {
+func (r *table_row) pretty_name() string {
s := r.name()
if len(s) > 30 {
s = s[:29]
return s
}
-func (r *file_summary_by_instance_row) headings() string {
+func (r *table_row) headings() string {
return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s|%8s %8s|%8s %6s %6s %6s",
"Table Name",
"Latency",
}
// generate a printable result
-func (row *file_summary_by_instance_row) row_content(totals file_summary_by_instance_row) string {
+func (row *table_row) row_content(totals table_row) string {
var name string = row.pretty_name()
// We assume that if COUNT_STAR = 0 then there's no data at all...
lib.FormatPct(lib.MyDivide(row.COUNT_MISC, row.COUNT_STAR)))
}
-func (this *file_summary_by_instance_row) add(other file_summary_by_instance_row) {
+func (this *table_row) add(other table_row) {
this.COUNT_STAR += other.COUNT_STAR
this.COUNT_READ += other.COUNT_READ
this.COUNT_WRITE += other.COUNT_WRITE
this.SUM_NUMBER_OF_BYTES_WRITE += other.SUM_NUMBER_OF_BYTES_WRITE
}
-func (this *file_summary_by_instance_row) subtract(other file_summary_by_instance_row) {
+func (this *table_row) subtract(other table_row) {
this.COUNT_STAR -= other.COUNT_STAR
this.COUNT_READ -= other.COUNT_READ
this.COUNT_WRITE -= other.COUNT_WRITE
}
// return the totals of a slice of rows
-func (t file_summary_by_instance_rows) totals() file_summary_by_instance_row {
- var totals file_summary_by_instance_row
+func (t table_rows) totals() table_row {
+ var totals table_row
totals.FILE_NAME = "Totals"
for i := range t {
// From the original FILE_NAME we want to generate a simpler name to use.
// This simpler name may also merge several different filenames into one.
-func (t file_summary_by_instance_row) simple_name(global_variables map[string]string) string {
+func (t table_row) simple_name(global_variables map[string]string) string {
path := t.FILE_NAME
// Convert the imported "table" to a merged one with merged data.
// Combine all entries with the same "FILE_NAME" by adding their values.
-func merge_by_table_name(orig file_summary_by_instance_rows, global_variables map[string]string) file_summary_by_instance_rows {
+func merge_by_table_name(orig table_rows, global_variables map[string]string) table_rows {
start := time.Now()
- t := make(file_summary_by_instance_rows, 0, len(orig))
+ t := make(table_rows, 0, len(orig))
- m := make(map[string]file_summary_by_instance_row)
+ m := make(map[string]table_row)
// iterate over source table
for i := range orig {
var file_name string
- var new_row file_summary_by_instance_row
+ var new_row table_row
orig_row := orig[i]
if orig_row.COUNT_STAR > 0 {
return t
}
-// Select the raw data from the database into file_summary_by_instance_rows
+// Select the raw data from the database into table_rows
// - filter out empty values
// - merge rows with the same name into a single row
// - change FILE_NAME into a more descriptive value.
-func select_fsbi_rows(dbh *sql.DB) file_summary_by_instance_rows {
- var t file_summary_by_instance_rows
+func select_rows(dbh *sql.DB) table_rows {
+ var t table_rows
start := time.Now()
sql := "SELECT FILE_NAME, COUNT_STAR, SUM_TIMER_WAIT, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM file_summary_by_instance"
defer rows.Close()
for rows.Next() {
- var r file_summary_by_instance_row
+ var r table_row
if err := rows.Scan(&r.FILE_NAME, &r.COUNT_STAR, &r.SUM_TIMER_WAIT, &r.COUNT_READ, &r.SUM_TIMER_READ, &r.SUM_NUMBER_OF_BYTES_READ, &r.COUNT_WRITE, &r.SUM_TIMER_WRITE, &r.SUM_NUMBER_OF_BYTES_WRITE, &r.COUNT_MISC, &r.SUM_TIMER_MISC); err != nil {
log.Fatal(err)
if err := rows.Err(); err != nil {
log.Fatal(err)
}
- lib.Logger.Println("select_fsbi_rows() took:", time.Duration(time.Since(start)).String())
+ lib.Logger.Println("select_rows() took:", time.Duration(time.Since(start)).String())
return t
}
// remove the initial values from those rows where there's a match
// - if we find a row we can't match ignore it
-func (this *file_summary_by_instance_rows) subtract(initial file_summary_by_instance_rows) {
+func (this *table_rows) subtract(initial table_rows) {
i_by_name := make(map[string]int)
// iterate over rows by name
}
}
-func (t file_summary_by_instance_rows) Len() int { return len(t) }
-func (t file_summary_by_instance_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t file_summary_by_instance_rows) Less(i, j int) bool {
+func (t table_rows) Len() int { return len(t) }
+func (t table_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t table_rows) Less(i, j int) bool {
return (t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT) ||
((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) && (t[i].FILE_NAME < t[j].FILE_NAME))
}
-func (t *file_summary_by_instance_rows) sort() {
+func (t *table_rows) sort() {
sort.Sort(t)
}
// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
// check this by comparing totals.
-func (t file_summary_by_instance_rows) needs_refresh(t2 file_summary_by_instance_rows) bool {
+func (t table_rows) needs_refresh(t2 table_rows) bool {
my_totals := t.totals()
t2_totals := t2.totals()
--- /dev/null
+// p_s - library routines for pstop.
+//
+// This file contains the library routines for managing the
+// file_summary_by_instance table.
+package file_summary_by_instance
+
+import (
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/sjmudd/pstop/lib"
+ "github.com/sjmudd/pstop/p_s"
+)
+
+// a table of rows
+type Object struct {
+ p_s.RelativeStats
+ p_s.InitialTime
+ initial table_rows
+ current table_rows
+ results table_rows
+ totals table_row
+ global_variables map[string]string
+}
+
+// reset the statistics to current values
+func (t *Object) SyncReferenceValues() {
+ t.SetNow()
+ t.initial = make(table_rows, len(t.current))
+ copy(t.initial, t.current)
+
+ t.results = make(table_rows, len(t.current))
+ copy(t.results, t.current)
+
+ if t.WantRelativeStats() {
+ t.results.subtract(t.initial) // should be 0 if relative
+ }
+
+ t.results.sort()
+ t.totals = t.results.totals()
+}
+
+// Collect data from the db, then merge it in.
+func (t *Object) Collect(dbh *sql.DB) {
+ start := time.Now()
+ // UPDATE current from db handle
+ t.current = merge_by_table_name(select_rows(dbh), t.global_variables)
+
+ // copy in initial data if it was not there
+ if len(t.initial) == 0 && len(t.current) > 0 {
+ t.initial = make(table_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ // check for reload initial characteristics
+ if t.initial.needs_refresh(t.current) {
+ t.initial = make(table_rows, len(t.current))
+ copy(t.initial, t.current)
+ }
+
+ // update results to current value
+ t.results = make(table_rows, len(t.current))
+ copy(t.results, t.current)
+
+ // make relative if need be
+ if t.WantRelativeStats() {
+ t.results.subtract(t.initial)
+ }
+
+ // sort the results
+ t.results.sort()
+
+ // setup the totals
+ t.totals = t.results.totals()
+ lib.Logger.Println("Object.Collect() took:", time.Duration(time.Since(start)).String())
+}
+
+// return the headings for a table
+func (t Object) Headings() string {
+ var r table_row
+
+ return r.headings()
+}
+
+// return the rows we need for displaying
+func (t Object) RowContent(max_rows int) []string {
+ rows := make([]string, 0, max_rows)
+
+ for i := range t.results {
+ if i < max_rows {
+ rows = append(rows, t.results[i].row_content(t.totals))
+ }
+ }
+
+ return rows
+}
+
+// return all the totals
+func (t Object) TotalRowContent() string {
+ return t.totals.row_content(t.totals)
+}
+
+// return an empty string of data (for filling in)
+func (t Object) EmptyRowContent() string {
+ var emtpy table_row
+ return emtpy.row_content(emtpy)
+}
+
+func (t Object) Description() string {
+ count := t.count_rows()
+ return fmt.Sprintf("I/O Latency by File (file_summary_by_instance) %4d row(s) ", count)
+}
+
+// create a new structure and include various variable values:
+// - datadir, relay_log
+// There's no checking that these are actually provided!
+func NewFileSummaryByInstance(global_variables map[string]string) *Object {
+ n := new(Object)
+
+ n.global_variables = global_variables
+
+ return n
+}
+
+func (t Object) count_rows() int {
+ var count int
+ for row := range t.results {
+ if t.results[row].SUM_TIMER_WAIT > 0 {
+ count++
+ }
+ }
+ return count
+}
want_relative_stats bool
}
+// set if we want relative stats
func (wrs *RelativeStats) SetWantRelativeStats(want_relative_stats bool) {
wrs.want_relative_stats = want_relative_stats
}
+// return if we want relative stats
func (wrs RelativeStats) WantRelativeStats() bool {
return wrs.want_relative_stats
}
+// manage the time the statistics were taken
package p_s
import (
"time"
)
+// object to hold the last collection time
type InitialTime struct {
initial_time time.Time
}
+// reflect we've just collected statistics
func (t *InitialTime) SetNow() {
t.initial_time = time.Now()
}
+// return the last time we collected statistics
func (t InitialTime) Last() time.Time {
return t.initial_time
}
+// manage the configuration of performance_schema.setup_instruments
package setup_instruments
import (
// We only match on the error number
// Error 1142: UPDATE command denied to user
// Error 1290: The MySQL server is running with the --read-only option so it cannot execute this statement
-var EXPECTED_UPDATE_ERRORS = []string { "Error 1142", "Error 1290" }
+var EXPECTED_UPDATE_ERRORS = []string{
+ "Error 1142",
+ "Error 1290",
+}
-type setup_instruments_row struct {
+// one row of performance_schema.setup_instruments
+type table_row struct {
NAME string
ENABLED string
TIMED string
}
+type table_rows []table_row
+
+// SetupInstruments "object"
type SetupInstruments struct {
update_succeeded bool
- rows []setup_instruments_row
+ rows table_rows
}
// Change settings to monitor stage/sql/%
func (si *SetupInstruments) EnableStageMonitoring(dbh *sql.DB) {
sql := "SELECT NAME, ENABLED, TIMED FROM setup_instruments WHERE NAME LIKE 'stage/sql/%' AND ( enabled <> 'YES' OR timed <> 'YES' )"
collecting := "Collecting p_s.setup_instruments stage/sql configuration settings"
- updating := "Updating p_s.setup_instruments to allow stage/sql configuration"
+ updating := "Updating p_s.setup_instruments to allow stage/sql configuration"
si.ConfigureSetupInstruments(dbh, sql, collecting, updating)
}
func (si *SetupInstruments) EnableMutexMonitoring(dbh *sql.DB) {
sql := "SELECT NAME, ENABLED, TIMED FROM setup_instruments WHERE NAME LIKE 'wait/synch/mutex/%' AND ( enabled <> 'YES' OR timed <> 'YES' )"
collecting := "Collecting p_s.setup_instruments wait/synch/mutex configuration settings"
- updating := "Updating p_s.setup_instruments to allow wait/synch/mutex configuration"
+ updating := "Updating p_s.setup_instruments to allow wait/synch/mutex configuration"
si.ConfigureSetupInstruments(dbh, sql, collecting, updating)
}
func (si *SetupInstruments) ConfigureSetupInstruments(dbh *sql.DB, sql string, collecting, updating string) {
// setup the old values in case they're not set
if si.rows == nil {
- si.rows = make([]setup_instruments_row, 0, 500)
+ si.rows = make([]table_row, 0, 500)
}
lib.Logger.Println(collecting)
count := 0
for rows.Next() {
- var r setup_instruments_row
+ var r table_row
if err := rows.Scan(
&r.NAME,
&r.ENABLED,
lib.Logger.Println(updating)
count = 0
+ update_sql := "UPDATE setup_instruments SET enabled = ?, TIMED = ? WHERE NAME = ?"
+ stmt, err := dbh.Prepare( update_sql )
+ if err != nil {
+ log.Fatal(err)
+ }
for i := range si.rows {
- sql := "UPDATE setup_instruments SET enabled = 'YES', TIMED = 'YES' WHERE NAME = '" + si.rows[i].NAME + "'"
- if _, err := dbh.Exec(sql); err == nil {
+ if _, err := stmt.Exec(update_sql, "YES", "YES", si.rows[i].NAME); err == nil {
si.update_succeeded = true
} else {
found_expected := false
break
}
}
- if ! found_expected {
+ if !found_expected {
log.Fatal(err)
}
- lib.Logger.Println( "Insufficient privileges to UPDATE setup_instruments: " + err.Error() )
+ lib.Logger.Println("Insufficient privileges to UPDATE setup_instruments: " + err.Error())
break
}
count++
}
}
-
-// restore any changed rows back to their original state
+// restore setup_instruments rows to their previous settings
func (si *SetupInstruments) RestoreConfiguration(dbh *sql.DB) {
// If the previous update didn't work then don't try to restore
- if ! si.update_succeeded {
+ if !si.update_succeeded {
lib.Logger.Println("Not restoring p_s.setup_instruments to its original settings as previous UPDATE had failed")
return
} else {
// update the rows which need to be set - do multiple updates but I don't care
count := 0
+ update_sql := "UPDATE setup_instruments SET enabled = ?, TIMED = ? WHERE NAME = ?"
+ stmt, err := dbh.Prepare( update_sql )
+ if err != nil {
+ log.Fatal(err)
+ }
for i := range si.rows {
- sql := "UPDATE setup_instruments SET enabled = '" + si.rows[i].ENABLED + "', TIMED = '" + si.rows[i].TIMED + "' WHERE NAME = '" + si.rows[i].NAME + "'"
- if _, err := dbh.Exec(sql); err != nil {
+ if _, err := stmt.Exec(update_sql, si.rows[i].ENABLED, si.rows[i].TIMED, si.rows[i].NAME ); err != nil {
log.Fatal(err)
}
count++
// - filter out empty values
// - merge rows with the same name into a single row
// - change FILE_NAME into a more descriptive value.
-func select_tlwsbt_rows(dbh *sql.DB) table_rows {
+func select_rows(dbh *sql.DB) table_rows {
var t table_rows
sql := "SELECT OBJECT_TYPE, OBJECT_SCHEMA, OBJECT_NAME, COUNT_STAR, SUM_TIMER_WAIT, SUM_TIMER_READ, SUM_TIMER_WRITE, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM table_lock_waits_summary_by_table WHERE COUNT_STAR > 0"
// Collect data from the db, then merge it in.
func (t *Object) Collect(dbh *sql.DB) {
start := time.Now()
- t.current = select_tlwsbt_rows(dbh)
+ t.current = select_rows(dbh)
if len(t.initial) == 0 && len(t.current) > 0 {
t.initial = make(table_rows, len(t.current))
"strings"
"time"
- "github.com/sjmudd/pstop/i_s"
+ "github.com/sjmudd/pstop/i_s/processlist"
"github.com/sjmudd/pstop/lib"
ewsgben "github.com/sjmudd/pstop/p_s/events_waits_summary_global_by_event_name"
essgben "github.com/sjmudd/pstop/p_s/events_stages_summary_global_by_event_name"
tlwsbt ps_table.Tabler // tlwsbt.Table_lock_waits_summary_by_table
ewsgben ps_table.Tabler // ewsgben.Events_waits_summary_global_by_event_name
essgben ps_table.Tabler // essgben.Events_stages_summary_global_by_event_name
- users i_s.Processlist
+ users processlist.Object
screen screen.TermboxScreen
show Show
mysql_version string