more cleanups
authorSimon J Mudd <sjmudd@pobox.com>
Thu, 15 Jan 2015 21:50:05 +0000 (22:50 +0100)
committerSimon J Mudd <sjmudd@pobox.com>
Thu, 15 Jan 2015 21:50:05 +0000 (22:50 +0100)
12 files changed:
i_s/processlist/pl_by_user.go [moved from i_s/pl_by_user.go with 99% similarity]
i_s/processlist/private.go [moved from i_s/processlist_row.go with 87% similarity]
i_s/processlist/public.go [moved from i_s/processlist.go with 86% similarity]
p_s/file_summary_by_instance/file_summary_by_instance.go [deleted file]
p_s/file_summary_by_instance/private.go [moved from p_s/file_summary_by_instance/file_summary_by_instance_row.go with 86% similarity]
p_s/file_summary_by_instance/public.go [new file with mode: 0644]
p_s/relative_stats.go
p_s/relative_time.go
p_s/setup_instruments/setup_instruments.go
p_s/table_lock_waits_summary_by_table/private.go
p_s/table_lock_waits_summary_by_table/public.go
state/state.go

similarity index 99%
rename from i_s/pl_by_user.go
rename to i_s/processlist/pl_by_user.go
index 0098417..3100cc6 100644 (file)
@@ -1,4 +1,4 @@
-package i_s
+package processlist
 
 import (
        "fmt"
similarity index 87%
rename from i_s/processlist_row.go
rename to i_s/processlist/private.go
index 470d545..fe1792a 100644 (file)
@@ -1,6 +1,6 @@
 // This file contains the library routines for managing the
 // table_io_waits_by_table table.
-package i_s
+package processlist
 
 import (
        "database/sql"
@@ -22,7 +22,7 @@ CREATE TEMPORARY TABLE `PROCESSLIST` (
 */
 
 // a row from information_schema.processlist
-type processlist_row struct {
+type table_row struct {
        ID      uint64
        USER    string
        HOST    string
@@ -32,11 +32,11 @@ type processlist_row struct {
        STATE   string
        INFO    string
 }
-type processlist_rows []processlist_row
+type table_rows []table_row
 
 // get the output of I_S.PROCESSLIST
-func select_processlist(dbh *sql.DB) processlist_rows {
-       var t processlist_rows
+func select_processlist(dbh *sql.DB) table_rows {
+       var t table_rows
        var id sql.NullInt64
        var user sql.NullString
        var host sql.NullString
@@ -57,7 +57,7 @@ func select_processlist(dbh *sql.DB) processlist_rows {
        defer rows.Close()
 
        for rows.Next() {
-               var r processlist_row
+               var r table_row
                if err := rows.Scan(
                        &id,
                        &user,
@@ -91,11 +91,11 @@ func select_processlist(dbh *sql.DB) processlist_rows {
 }
 
 // describe a whole row
-func (r processlist_row) String() string {
+func (r table_row) String() string {
        return fmt.Sprintf("FIXME otuput of i_s")
 }
 
 // describe a whole table
-func (t processlist_rows) String() string {
+func (t table_rows) String() string {
        return fmt.Sprintf("FIXME otuput of i_s")
 }
similarity index 86%
rename from i_s/processlist.go
rename to i_s/processlist/public.go
index 81178e9..dcd0a25 100644 (file)
@@ -2,7 +2,7 @@
 //
 // This file contains the library routines for managing the
 // table_io_waits_by_table table.
-package i_s
+package processlist
 
 import (
        "database/sql"
@@ -17,10 +17,10 @@ import (
 type map_string_int map[string]int
 
 // a table of rows
-type Processlist struct {
+type Object struct {
        p_s.RelativeStats
        p_s.InitialTime
-       current processlist_rows // processlist
+       current table_rows // processlist
        results pl_by_user_rows  // results by user
        totals  pl_by_user_row   // totals of results
 }
@@ -28,8 +28,8 @@ type Processlist struct {
 // Collect() collects data from the db, updating initial
 // values if needed, and then subtracting initial values if we want
 // relative values, after which it stores totals.
-func (t *Processlist) Collect(dbh *sql.DB) {
-       lib.Logger.Println("Processlist.Collect() - starting collection of data")
+func (t *Object) Collect(dbh *sql.DB) {
+       lib.Logger.Println("Object.Collect() - starting collection of data")
        start := time.Now()
 
        t.current = select_processlist(dbh)
@@ -41,22 +41,22 @@ func (t *Processlist) Collect(dbh *sql.DB) {
        // lib.Logger.Println( "- collecting t.totals from t.results" )
        t.totals = t.results.totals()
 
-       lib.Logger.Println("Processlist.Collect() END, took:", time.Duration(time.Since(start)).String())
+       lib.Logger.Println("Object.Collect() END, took:", time.Duration(time.Since(start)).String())
 }
 
-func (t *Processlist) Headings() string {
+func (t *Object) Headings() string {
        return t.results.Headings()
 }
 
-func (t Processlist) EmptyRowContent() string {
+func (t Object) EmptyRowContent() string {
        return t.results.emptyRowContent()
 }
 
-func (t Processlist) TotalRowContent() string {
+func (t Object) TotalRowContent() string {
        return t.totals.row_content(t.totals)
 }
 
-func (t Processlist) RowContent(max_rows int) []string {
+func (t Object) RowContent(max_rows int) []string {
        rows := make([]string, 0, max_rows)
 
        for i := range t.results {
@@ -68,12 +68,12 @@ func (t Processlist) RowContent(max_rows int) []string {
        return rows
 }
 
-func (t Processlist) Description() string {
+func (t Object) Description() string {
        count := t.count_rows()
        return fmt.Sprintf("Activity by Username (processlist) %d rows", count)
 }
 
-func (t Processlist) count_rows() int {
+func (t Object) count_rows() int {
        var count int
        for row := range t.results {
                if t.results[row].username != "" {
@@ -94,8 +94,8 @@ func get_hostname(h_p string) string {
 }
 
 // read in processlist and add the appropriate values into a new pl_by_user table
-func (t *Processlist) processlist2by_user() {
-       lib.Logger.Println("Processlist.processlist2by_user() START")
+func (t *Object) processlist2by_user() {
+       lib.Logger.Println("Object.processlist2by_user() START")
 
        var re_active_repl_master_thread *regexp.Regexp = regexp.MustCompile("Sending binlog event to slave")
        var re_select *regexp.Regexp = regexp.MustCompile(`(?i)SELECT`) // make case insensitive
@@ -195,5 +195,5 @@ func (t *Processlist) processlist2by_user() {
 
        t.totals = t.results.totals()
 
-       lib.Logger.Println("Processlist.processlist2by_user() END")
+       lib.Logger.Println("Object.processlist2by_user() END")
 }
diff --git a/p_s/file_summary_by_instance/file_summary_by_instance.go b/p_s/file_summary_by_instance/file_summary_by_instance.go
deleted file mode 100644 (file)
index 7a8f2d8..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-// p_s - library routines for pstop.
-//
-// This file contains the library routines for managing the
-// file_summary_by_instance table.
-package file_summary_by_instance
-
-import (
-       "database/sql"
-       "fmt"
-       "time"
-
-       "github.com/sjmudd/pstop/lib"
-       "github.com/sjmudd/pstop/p_s"
-)
-
-/*
-CREATE TABLE `file_summary_by_instance` (
-  `FILE_NAME` varchar(512) NOT NULL,
-  `EVENT_NAME` varchar(128) NOT NULL,                          // not collected
-  `OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL,                // not collected
-  `COUNT_STAR` bigint(20) unsigned NOT NULL,
-  `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
-  `MIN_TIMER_WAIT` bigint(20) unsigned NOT NULL,
-  `AVG_TIMER_WAIT` bigint(20) unsigned NOT NULL,
-  `MAX_TIMER_WAIT` bigint(20) unsigned NOT NULL,
-  `COUNT_READ` bigint(20) unsigned NOT NULL,
-  `SUM_TIMER_READ` bigint(20) unsigned NOT NULL,
-  `MIN_TIMER_READ` bigint(20) unsigned NOT NULL,
-  `AVG_TIMER_READ` bigint(20) unsigned NOT NULL,
-  `MAX_TIMER_READ` bigint(20) unsigned NOT NULL,
-  `SUM_NUMBER_OF_BYTES_READ` bigint(20) NOT NULL,
-  `COUNT_WRITE` bigint(20) unsigned NOT NULL,
-  `SUM_TIMER_WRITE` bigint(20) unsigned NOT NULL,
-  `MIN_TIMER_WRITE` bigint(20) unsigned NOT NULL,
-  `AVG_TIMER_WRITE` bigint(20) unsigned NOT NULL,
-  `MAX_TIMER_WRITE` bigint(20) unsigned NOT NULL,
-  `SUM_NUMBER_OF_BYTES_WRITE` bigint(20) NOT NULL,
-  `COUNT_MISC` bigint(20) unsigned NOT NULL,
-  `SUM_TIMER_MISC` bigint(20) unsigned NOT NULL,
-  `MIN_TIMER_MISC` bigint(20) unsigned NOT NULL,
-  `AVG_TIMER_MISC` bigint(20) unsigned NOT NULL,
-  `MAX_TIMER_MISC` bigint(20) unsigned NOT NULL
-) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
-1 row in set (0.00 sec)
-
-*/
-
-// a table of rows
-type File_summary_by_instance struct {
-       p_s.RelativeStats
-       p_s.InitialTime
-       initial          file_summary_by_instance_rows
-       current          file_summary_by_instance_rows
-       results          file_summary_by_instance_rows
-       totals           file_summary_by_instance_row
-       global_variables map[string]string
-}
-
-// reset the statistics to current values
-func (t *File_summary_by_instance) SyncReferenceValues() {
-       t.SetNow()
-       t.initial = make(file_summary_by_instance_rows, len(t.current))
-       copy(t.initial, t.current)
-
-       t.results = make(file_summary_by_instance_rows, len(t.current))
-       copy(t.results, t.current)
-
-       if t.WantRelativeStats() {
-               t.results.subtract(t.initial) // should be 0 if relative
-       }
-
-       t.results.sort()
-       t.totals = t.results.totals()
-}
-
-// Collect data from the db, then merge it in.
-func (t *File_summary_by_instance) Collect(dbh *sql.DB) {
-       start := time.Now()
-       // UPDATE current from db handle
-       t.current = merge_by_table_name(select_fsbi_rows(dbh), t.global_variables)
-
-       // copy in initial data if it was not there
-       if len(t.initial) == 0 && len(t.current) > 0 {
-               t.initial = make(file_summary_by_instance_rows, len(t.current))
-               copy(t.initial, t.current)
-       }
-
-       // check for reload initial characteristics
-       if t.initial.needs_refresh(t.current) {
-               t.initial = make(file_summary_by_instance_rows, len(t.current))
-               copy(t.initial, t.current)
-       }
-
-       // update results to current value
-       t.results = make(file_summary_by_instance_rows, len(t.current))
-       copy(t.results, t.current)
-
-       // make relative if need be
-       if t.WantRelativeStats() {
-               t.results.subtract(t.initial)
-       }
-
-       // sort the results
-       t.results.sort()
-
-       // setup the totals
-       t.totals = t.results.totals()
-       lib.Logger.Println("File_summary_by_instance.Collect() took:", time.Duration(time.Since(start)).String())
-}
-
-// return the headings for a table
-func (t File_summary_by_instance) Headings() string {
-       var r file_summary_by_instance_row
-
-       return r.headings()
-}
-
-// return the rows we need for displaying
-func (t File_summary_by_instance) RowContent(max_rows int) []string {
-       rows := make([]string, 0, max_rows)
-
-       for i := range t.results {
-               if i < max_rows {
-                       rows = append(rows, t.results[i].row_content(t.totals))
-               }
-       }
-
-       return rows
-}
-
-// return all the totals
-func (t File_summary_by_instance) TotalRowContent() string {
-       return t.totals.row_content(t.totals)
-}
-
-// return an empty string of data (for filling in)
-func (t File_summary_by_instance) EmptyRowContent() string {
-       var emtpy file_summary_by_instance_row
-       return emtpy.row_content(emtpy)
-}
-
-func (t File_summary_by_instance) Description() string {
-       count := t.count_rows()
-       return fmt.Sprintf("I/O Latency by File (file_summary_by_instance) %4d row(s)    ", count)
-}
-
-// create a new structure and include various variable values:
-// - datadir, relay_log
-// There's no checking that these are actually provided!
-func NewFileSummaryByInstance(global_variables map[string]string) *File_summary_by_instance {
-       n := new(File_summary_by_instance)
-
-       n.global_variables = global_variables
-
-       return n
-}
-
-func (t File_summary_by_instance) count_rows() int {
-       var count int
-       for row := range t.results {
-               if t.results[row].SUM_TIMER_WAIT > 0 {
-                       count++
-               }
-       }
-       return count
-}
@@ -72,7 +72,7 @@ var (
        cache key_value_cache.KeyValueCache
 )
 
-type file_summary_by_instance_row struct {
+type table_row struct {
        FILE_NAME string
 
        COUNT_STAR  uint64
@@ -90,15 +90,15 @@ type file_summary_by_instance_row struct {
 }
 
 // represents a table or set of rows
-type file_summary_by_instance_rows []file_summary_by_instance_row
+type table_rows []table_row
 
 // Return the name using the FILE_NAME attribute.
-func (r *file_summary_by_instance_row) name() string {
+func (r *table_row) name() string {
        return r.FILE_NAME
 }
 
 // Return a formatted pretty name for the row.
-func (r *file_summary_by_instance_row) pretty_name() string {
+func (r *table_row) pretty_name() string {
        s := r.name()
        if len(s) > 30 {
                s = s[:29]
@@ -106,7 +106,7 @@ func (r *file_summary_by_instance_row) pretty_name() string {
        return s
 }
 
-func (r *file_summary_by_instance_row) headings() string {
+func (r *table_row) headings() string {
        return fmt.Sprintf("%-30s %10s %6s|%6s %6s %6s|%8s %8s|%8s %6s %6s %6s",
                "Table Name",
                "Latency",
@@ -123,7 +123,7 @@ func (r *file_summary_by_instance_row) headings() string {
 }
 
 // generate a printable result
-func (row *file_summary_by_instance_row) row_content(totals file_summary_by_instance_row) string {
+func (row *table_row) row_content(totals table_row) string {
        var name string = row.pretty_name()
 
        // We assume that if COUNT_STAR = 0 then there's no data at all...
@@ -147,7 +147,7 @@ func (row *file_summary_by_instance_row) row_content(totals file_summary_by_inst
                lib.FormatPct(lib.MyDivide(row.COUNT_MISC, row.COUNT_STAR)))
 }
 
-func (this *file_summary_by_instance_row) add(other file_summary_by_instance_row) {
+func (this *table_row) add(other table_row) {
        this.COUNT_STAR += other.COUNT_STAR
        this.COUNT_READ += other.COUNT_READ
        this.COUNT_WRITE += other.COUNT_WRITE
@@ -162,7 +162,7 @@ func (this *file_summary_by_instance_row) add(other file_summary_by_instance_row
        this.SUM_NUMBER_OF_BYTES_WRITE += other.SUM_NUMBER_OF_BYTES_WRITE
 }
 
-func (this *file_summary_by_instance_row) subtract(other file_summary_by_instance_row) {
+func (this *table_row) subtract(other table_row) {
        this.COUNT_STAR -= other.COUNT_STAR
        this.COUNT_READ -= other.COUNT_READ
        this.COUNT_WRITE -= other.COUNT_WRITE
@@ -178,8 +178,8 @@ func (this *file_summary_by_instance_row) subtract(other file_summary_by_instanc
 }
 
 // return the totals of a slice of rows
-func (t file_summary_by_instance_rows) totals() file_summary_by_instance_row {
-       var totals file_summary_by_instance_row
+func (t table_rows) totals() table_row {
+       var totals table_row
        totals.FILE_NAME = "Totals"
 
        for i := range t {
@@ -206,7 +206,7 @@ func cleanup_path(path string) string {
 
 // From the original FILE_NAME we want to generate a simpler name to use.
 // This simpler name may also merge several different filenames into one.
-func (t file_summary_by_instance_row) simple_name(global_variables map[string]string) string {
+func (t table_row) simple_name(global_variables map[string]string) string {
 
        path := t.FILE_NAME
 
@@ -283,16 +283,16 @@ func (t file_summary_by_instance_row) simple_name(global_variables map[string]st
 
 // Convert the imported "table" to a merged one with merged data.
 // Combine all entries with the same "FILE_NAME" by adding their values.
-func merge_by_table_name(orig file_summary_by_instance_rows, global_variables map[string]string) file_summary_by_instance_rows {
+func merge_by_table_name(orig table_rows, global_variables map[string]string) table_rows {
        start := time.Now()
-       t := make(file_summary_by_instance_rows, 0, len(orig))
+       t := make(table_rows, 0, len(orig))
 
-       m := make(map[string]file_summary_by_instance_row)
+       m := make(map[string]table_row)
 
        // iterate over source table
        for i := range orig {
                var file_name string
-               var new_row file_summary_by_instance_row
+               var new_row table_row
                orig_row := orig[i]
 
                if orig_row.COUNT_STAR > 0 {
@@ -318,12 +318,12 @@ func merge_by_table_name(orig file_summary_by_instance_rows, global_variables ma
        return t
 }
 
-// Select the raw data from the database into file_summary_by_instance_rows
+// Select the raw data from the database into table_rows
 // - filter out empty values
 // - merge rows with the same name into a single row
 // - change FILE_NAME into a more descriptive value.
-func select_fsbi_rows(dbh *sql.DB) file_summary_by_instance_rows {
-       var t file_summary_by_instance_rows
+func select_rows(dbh *sql.DB) table_rows {
+       var t table_rows
        start := time.Now()
 
        sql := "SELECT FILE_NAME, COUNT_STAR, SUM_TIMER_WAIT, COUNT_READ, SUM_TIMER_READ, SUM_NUMBER_OF_BYTES_READ, COUNT_WRITE, SUM_TIMER_WRITE, SUM_NUMBER_OF_BYTES_WRITE, COUNT_MISC, SUM_TIMER_MISC FROM file_summary_by_instance"
@@ -335,7 +335,7 @@ func select_fsbi_rows(dbh *sql.DB) file_summary_by_instance_rows {
        defer rows.Close()
 
        for rows.Next() {
-               var r file_summary_by_instance_row
+               var r table_row
 
                if err := rows.Scan(&r.FILE_NAME, &r.COUNT_STAR, &r.SUM_TIMER_WAIT, &r.COUNT_READ, &r.SUM_TIMER_READ, &r.SUM_NUMBER_OF_BYTES_READ, &r.COUNT_WRITE, &r.SUM_TIMER_WRITE, &r.SUM_NUMBER_OF_BYTES_WRITE, &r.COUNT_MISC, &r.SUM_TIMER_MISC); err != nil {
                        log.Fatal(err)
@@ -345,14 +345,14 @@ func select_fsbi_rows(dbh *sql.DB) file_summary_by_instance_rows {
        if err := rows.Err(); err != nil {
                log.Fatal(err)
        }
-       lib.Logger.Println("select_fsbi_rows() took:", time.Duration(time.Since(start)).String())
+       lib.Logger.Println("select_rows() took:", time.Duration(time.Since(start)).String())
 
        return t
 }
 
 // remove the initial values from those rows where there's a match
 // - if we find a row we can't match ignore it
-func (this *file_summary_by_instance_rows) subtract(initial file_summary_by_instance_rows) {
+func (this *table_rows) subtract(initial table_rows) {
        i_by_name := make(map[string]int)
 
        // iterate over rows by name
@@ -368,20 +368,20 @@ func (this *file_summary_by_instance_rows) subtract(initial file_summary_by_inst
        }
 }
 
-func (t file_summary_by_instance_rows) Len() int      { return len(t) }
-func (t file_summary_by_instance_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t file_summary_by_instance_rows) Less(i, j int) bool {
+func (t table_rows) Len() int      { return len(t) }
+func (t table_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t table_rows) Less(i, j int) bool {
        return (t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT) ||
                ((t[i].SUM_TIMER_WAIT == t[j].SUM_TIMER_WAIT) && (t[i].FILE_NAME < t[j].FILE_NAME))
 }
 
-func (t *file_summary_by_instance_rows) sort() {
+func (t *table_rows) sort() {
        sort.Sort(t)
 }
 
 // if the data in t2 is "newer", "has more values" than t then it needs refreshing.
 // check this by comparing totals.
-func (t file_summary_by_instance_rows) needs_refresh(t2 file_summary_by_instance_rows) bool {
+func (t table_rows) needs_refresh(t2 table_rows) bool {
        my_totals := t.totals()
        t2_totals := t2.totals()
 
diff --git a/p_s/file_summary_by_instance/public.go b/p_s/file_summary_by_instance/public.go
new file mode 100644 (file)
index 0000000..408f445
--- /dev/null
@@ -0,0 +1,134 @@
+// p_s - library routines for pstop.
+//
+// This file contains the library routines for managing the
+// file_summary_by_instance table.
+package file_summary_by_instance
+
+import (
+       "database/sql"
+       "fmt"
+       "time"
+
+       "github.com/sjmudd/pstop/lib"
+       "github.com/sjmudd/pstop/p_s"
+)
+
+// a table of rows
+type Object struct {
+       p_s.RelativeStats
+       p_s.InitialTime
+       initial          table_rows
+       current          table_rows
+       results          table_rows
+       totals           table_row
+       global_variables map[string]string
+}
+
+// reset the statistics to current values
+func (t *Object) SyncReferenceValues() {
+       t.SetNow()
+       t.initial = make(table_rows, len(t.current))
+       copy(t.initial, t.current)
+
+       t.results = make(table_rows, len(t.current))
+       copy(t.results, t.current)
+
+       if t.WantRelativeStats() {
+               t.results.subtract(t.initial) // should be 0 if relative
+       }
+
+       t.results.sort()
+       t.totals = t.results.totals()
+}
+
+// Collect data from the db, then merge it in.
+func (t *Object) Collect(dbh *sql.DB) {
+       start := time.Now()
+       // UPDATE current from db handle
+       t.current = merge_by_table_name(select_rows(dbh), t.global_variables)
+
+       // copy in initial data if it was not there
+       if len(t.initial) == 0 && len(t.current) > 0 {
+               t.initial = make(table_rows, len(t.current))
+               copy(t.initial, t.current)
+       }
+
+       // check for reload initial characteristics
+       if t.initial.needs_refresh(t.current) {
+               t.initial = make(table_rows, len(t.current))
+               copy(t.initial, t.current)
+       }
+
+       // update results to current value
+       t.results = make(table_rows, len(t.current))
+       copy(t.results, t.current)
+
+       // make relative if need be
+       if t.WantRelativeStats() {
+               t.results.subtract(t.initial)
+       }
+
+       // sort the results
+       t.results.sort()
+
+       // setup the totals
+       t.totals = t.results.totals()
+       lib.Logger.Println("Object.Collect() took:", time.Duration(time.Since(start)).String())
+}
+
+// return the headings for a table
+func (t Object) Headings() string {
+       var r table_row
+
+       return r.headings()
+}
+
+// return the rows we need for displaying
+func (t Object) RowContent(max_rows int) []string {
+       rows := make([]string, 0, max_rows)
+
+       for i := range t.results {
+               if i < max_rows {
+                       rows = append(rows, t.results[i].row_content(t.totals))
+               }
+       }
+
+       return rows
+}
+
+// return all the totals
+func (t Object) TotalRowContent() string {
+       return t.totals.row_content(t.totals)
+}
+
+// return an empty string of data (for filling in)
+func (t Object) EmptyRowContent() string {
+       var emtpy table_row
+       return emtpy.row_content(emtpy)
+}
+
+func (t Object) Description() string {
+       count := t.count_rows()
+       return fmt.Sprintf("I/O Latency by File (file_summary_by_instance) %4d row(s)    ", count)
+}
+
+// create a new structure and include various variable values:
+// - datadir, relay_log
+// There's no checking that these are actually provided!
+func NewFileSummaryByInstance(global_variables map[string]string) *Object {
+       n := new(Object)
+
+       n.global_variables = global_variables
+
+       return n
+}
+
+func (t Object) count_rows() int {
+       var count int
+       for row := range t.results {
+               if t.results[row].SUM_TIMER_WAIT > 0 {
+                       count++
+               }
+       }
+       return count
+}
index 438fafd..4740704 100644 (file)
@@ -8,10 +8,12 @@ type RelativeStats struct {
        want_relative_stats bool
 }
 
+// set if we want relative stats
 func (wrs *RelativeStats) SetWantRelativeStats(want_relative_stats bool) {
        wrs.want_relative_stats = want_relative_stats
 }
 
+// return if we want relative stats
 func (wrs RelativeStats) WantRelativeStats() bool {
        return wrs.want_relative_stats
 }
index 2339d28..2cf742b 100644 (file)
@@ -1,17 +1,21 @@
+// manage the time the statistics were taken
 package p_s
 
 import (
        "time"
 )
 
+// object to hold the last collection time
 type InitialTime struct {
        initial_time time.Time
 }
 
+// reflect we've just collected statistics
 func (t *InitialTime) SetNow() {
        t.initial_time = time.Now()
 }
 
+// return the last time we collected statistics
 func (t InitialTime) Last() time.Time {
        return t.initial_time
 }
index 285c86e..cf4e347 100644 (file)
@@ -1,3 +1,4 @@
+// manage the configuration of performance_schema.setup_instruments
 package setup_instruments
 
 import (
@@ -10,24 +11,31 @@ import (
 // We only match on the error number
 // Error 1142: UPDATE command denied to user
 // Error 1290: The MySQL server is running with the --read-only option so it cannot execute this statement
-var EXPECTED_UPDATE_ERRORS = []string { "Error 1142", "Error 1290" }
+var EXPECTED_UPDATE_ERRORS = []string{
+       "Error 1142",
+       "Error 1290",
+}
 
-type setup_instruments_row struct {
+// one row of performance_schema.setup_instruments
+type table_row struct {
        NAME    string
        ENABLED string
        TIMED   string
 }
 
+type table_rows []table_row
+
+// SetupInstruments "object"
 type SetupInstruments struct {
        update_succeeded bool
-       rows []setup_instruments_row
+       rows             table_rows
 }
 
 // Change settings to monitor stage/sql/%
 func (si *SetupInstruments) EnableStageMonitoring(dbh *sql.DB) {
        sql := "SELECT NAME, ENABLED, TIMED FROM setup_instruments WHERE NAME LIKE 'stage/sql/%' AND ( enabled <> 'YES' OR timed <> 'YES' )"
        collecting := "Collecting p_s.setup_instruments stage/sql configuration settings"
-       updating   := "Updating p_s.setup_instruments to allow stage/sql configuration"
+       updating := "Updating p_s.setup_instruments to allow stage/sql configuration"
 
        si.ConfigureSetupInstruments(dbh, sql, collecting, updating)
 }
@@ -36,7 +44,7 @@ func (si *SetupInstruments) EnableStageMonitoring(dbh *sql.DB) {
 func (si *SetupInstruments) EnableMutexMonitoring(dbh *sql.DB) {
        sql := "SELECT NAME, ENABLED, TIMED FROM setup_instruments WHERE NAME LIKE 'wait/synch/mutex/%' AND ( enabled <> 'YES' OR timed <> 'YES' )"
        collecting := "Collecting p_s.setup_instruments wait/synch/mutex configuration settings"
-       updating   := "Updating p_s.setup_instruments to allow wait/synch/mutex configuration"
+       updating := "Updating p_s.setup_instruments to allow wait/synch/mutex configuration"
 
        si.ConfigureSetupInstruments(dbh, sql, collecting, updating)
 }
@@ -45,7 +53,7 @@ func (si *SetupInstruments) EnableMutexMonitoring(dbh *sql.DB) {
 func (si *SetupInstruments) ConfigureSetupInstruments(dbh *sql.DB, sql string, collecting, updating string) {
        // setup the old values in case they're not set
        if si.rows == nil {
-               si.rows = make([]setup_instruments_row, 0, 500)
+               si.rows = make([]table_row, 0, 500)
        }
 
        lib.Logger.Println(collecting)
@@ -58,7 +66,7 @@ func (si *SetupInstruments) ConfigureSetupInstruments(dbh *sql.DB, sql string, c
 
        count := 0
        for rows.Next() {
-               var r setup_instruments_row
+               var r table_row
                if err := rows.Scan(
                        &r.NAME,
                        &r.ENABLED,
@@ -78,9 +86,13 @@ func (si *SetupInstruments) ConfigureSetupInstruments(dbh *sql.DB, sql string, c
        lib.Logger.Println(updating)
 
        count = 0
+       update_sql := "UPDATE setup_instruments SET enabled = ?, TIMED = ? WHERE NAME = ?"
+       stmt, err := dbh.Prepare( update_sql )
+       if err != nil {
+               log.Fatal(err)
+       }
        for i := range si.rows {
-               sql := "UPDATE setup_instruments SET enabled = 'YES', TIMED = 'YES' WHERE NAME = '" + si.rows[i].NAME + "'"
-               if _, err := dbh.Exec(sql); err == nil {
+               if _, err := stmt.Exec(update_sql, "YES", "YES", si.rows[i].NAME); err == nil {
                        si.update_succeeded = true
                } else {
                        found_expected := false
@@ -90,10 +102,10 @@ func (si *SetupInstruments) ConfigureSetupInstruments(dbh *sql.DB, sql string, c
                                        break
                                }
                        }
-                       if ! found_expected {
+                       if !found_expected {
                                log.Fatal(err)
                        }
-                       lib.Logger.Println( "Insufficient privileges to UPDATE setup_instruments: " + err.Error() )
+                       lib.Logger.Println("Insufficient privileges to UPDATE setup_instruments: " + err.Error())
                        break
                }
                count++
@@ -103,11 +115,10 @@ func (si *SetupInstruments) ConfigureSetupInstruments(dbh *sql.DB, sql string, c
        }
 }
 
-
-// restore any changed rows back to their original state
+// restore setup_instruments rows to their previous settings
 func (si *SetupInstruments) RestoreConfiguration(dbh *sql.DB) {
        // If the previous update didn't work then don't try to restore
-       if ! si.update_succeeded {
+       if !si.update_succeeded {
                lib.Logger.Println("Not restoring p_s.setup_instruments to its original settings as previous UPDATE had failed")
                return
        } else {
@@ -116,9 +127,13 @@ func (si *SetupInstruments) RestoreConfiguration(dbh *sql.DB) {
 
        // update the rows which need to be set - do multiple updates but I don't care
        count := 0
+       update_sql := "UPDATE setup_instruments SET enabled = ?, TIMED = ? WHERE NAME = ?"
+       stmt, err := dbh.Prepare( update_sql )
+       if err != nil {
+               log.Fatal(err)
+       }
        for i := range si.rows {
-               sql := "UPDATE setup_instruments SET enabled = '" + si.rows[i].ENABLED + "', TIMED = '" + si.rows[i].TIMED + "' WHERE NAME = '" + si.rows[i].NAME + "'"
-               if _, err := dbh.Exec(sql); err != nil {
+               if _, err := stmt.Exec(update_sql, si.rows[i].ENABLED, si.rows[i].TIMED, si.rows[i].NAME ); err != nil {
                        log.Fatal(err)
                }
                count++
index 60b5b7b..2f8ed67 100644 (file)
@@ -231,7 +231,7 @@ func (t table_rows) totals() table_row {
 // - filter out empty values
 // - merge rows with the same name into a single row
 // - change FILE_NAME into a more descriptive value.
-func select_tlwsbt_rows(dbh *sql.DB) table_rows {
+func select_rows(dbh *sql.DB) table_rows {
        var t table_rows
 
        sql := "SELECT OBJECT_TYPE, OBJECT_SCHEMA, OBJECT_NAME, COUNT_STAR, SUM_TIMER_WAIT, SUM_TIMER_READ, SUM_TIMER_WRITE, SUM_TIMER_READ_WITH_SHARED_LOCKS, SUM_TIMER_READ_HIGH_PRIORITY, SUM_TIMER_READ_NO_INSERT, SUM_TIMER_READ_NORMAL, SUM_TIMER_READ_EXTERNAL, SUM_TIMER_WRITE_ALLOW_WRITE, SUM_TIMER_WRITE_CONCURRENT_INSERT, SUM_TIMER_WRITE_LOW_PRIORITY, SUM_TIMER_WRITE_NORMAL, SUM_TIMER_WRITE_EXTERNAL FROM table_lock_waits_summary_by_table WHERE COUNT_STAR > 0"
index 1ee9f4a..65ef683 100644 (file)
@@ -24,7 +24,7 @@ type Object struct {
 // Collect data from the db, then merge it in.
 func (t *Object) Collect(dbh *sql.DB) {
        start := time.Now()
-       t.current = select_tlwsbt_rows(dbh)
+       t.current = select_rows(dbh)
 
        if len(t.initial) == 0 && len(t.current) > 0 {
                t.initial = make(table_rows, len(t.current))
index a46e33d..c7168ed 100644 (file)
@@ -9,7 +9,7 @@ import (
        "strings"
        "time"
 
-       "github.com/sjmudd/pstop/i_s"
+       "github.com/sjmudd/pstop/i_s/processlist"
        "github.com/sjmudd/pstop/lib"
        ewsgben "github.com/sjmudd/pstop/p_s/events_waits_summary_global_by_event_name"
        essgben "github.com/sjmudd/pstop/p_s/events_stages_summary_global_by_event_name"
@@ -47,7 +47,7 @@ type State struct {
        tlwsbt              ps_table.Tabler // tlwsbt.Table_lock_waits_summary_by_table
        ewsgben             ps_table.Tabler // ewsgben.Events_waits_summary_global_by_event_name
        essgben             ps_table.Tabler // essgben.Events_stages_summary_global_by_event_name
-       users               i_s.Processlist
+       users               processlist.Object
        screen              screen.TermboxScreen
        show                Show
        mysql_version       string