Remove unused directory
authorSimon J Mudd <sjmudd@pobox.com>
Sat, 15 Nov 2014 07:27:42 +0000 (08:27 +0100)
committerSimon J Mudd <sjmudd@pobox.com>
Sat, 15 Nov 2014 07:28:18 +0000 (08:28 +0100)
performance_schema/replication_workload/replication_workload.go [deleted file]
performance_schema/replication_workload/replication_workload_row.go [deleted file]

diff --git a/performance_schema/replication_workload/replication_workload.go b/performance_schema/replication_workload/replication_workload.go
deleted file mode 100644 (file)
index 35b048d..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-package replication_workload
-
-import (
-        "database/sql"
-
-        // "github.com/sjmudd/pstop/lib"
-        ps "github.com/sjmudd/pstop/performance_schema"
-)
-
-// a table of rows
-type Replication_workload struct {
-        ps.RelativeStats
-        ps.InitialTime
-        initial replication_workload_rows
-        current replication_workload_rows
-        results replication_workload_rows
-        totals  replication_workload_row
-}
-
-// reset the statistics to current values
-func (t *Replication_workload) UpdateInitialValues() {
-        t.SetNow()
-        t.initial = make(replication_workload_rows, len(t.current))
-        copy(t.initial, t.current)
-
-        t.results = make(replication_workload_rows, len(t.current))
-        copy(t.results, t.current)
-
-        if t.WantRelativeStats() {
-                t.results.subtract(t.initial) // should be 0 if relative
-        }
-
-        t.results.sort()
-        t.totals = t.results.totals()
-}
-
-// Collect data from the db, then merge it in.
-func (t *Replication_workload) Collect(dbh *sql.DB) {
-}
-
-
-
-
-
-
-
-// return the headings for a table
-func (t Replication_workload) Headings() string {
-        var r replication_workload_row
-
-        return r.headings()
-}
-
-// return the rows we need for displaying
-func (t Replication_workload) RowContent(max_rows int) []string {
-        rows := make([]string, 0, max_rows)
-
-        for i := range t.results {
-                if i < max_rows {
-                        rows = append(rows, t.results[i].row_content(t.totals))
-                }
-        }
-
-        return rows
-}
-
-// return all the totals
-func (t Replication_workload) TotalRowContent() string {
-        return t.totals.row_content(t.totals)
-}
-
-// return an empty string of data (for filling in)
-func (t Replication_workload) EmptyRowContent() string {
-        var emtpy replication_workload_row
-        return emtpy.row_content(emtpy)
-}
-
-func (t Replication_workload) Description() string {
-        return "File I/O by filename (replication_workload)"
-}
-
diff --git a/performance_schema/replication_workload/replication_workload_row.go b/performance_schema/replication_workload/replication_workload_row.go
deleted file mode 100644 (file)
index caf70a9..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-package replication_workload
-
-import (
-        "database/sql"
-        "fmt"
-        "log"
-        "sort"
-        "strings"
-
-        "github.com/sjmudd/pstop/lib"
-)
-
-type replication_workload_row struct {
-       NAME string
-       EVENT_NAME string
-       OBJECT_NAME string
-       OPERATION string
-       SUM_TIMER_WAIT int
-       SUM_SPINS int
-       SUM_NUMBER_OF_BYTES int
-}
-
-type replication_workload_rows []replication_workload_row
-
-func select_rep_workload_rows(dbh *sql.DB) replication_workload_rows {
-        var t replication_workload_rows
-
-       sql := "SELECT t.NAME, ewc.EVENT_NAME, ewc.OBJECT_NAME, ewc.OPERATION, SUM(ewc.TIMER_WAIT) AS SUM_TIMER_WAIT, SUM(ewc.SPINS) AS SUM_SPINS, SUM(ewc.NUMBER_OF_BYTES) AS SUM_NUMBER_OF_BYTES,  FROM events_waits_history ewc JOIN threads t ON (t.THREAD_ID = ewc.thread_id) WHERE t.NAME LIKE '%slave_sql%' GROUP BY t.NAME, ewc.EVENT_NAME, ewc.OBJECT_NAME, ewc.OPERATION"
-
-        rows, err := dbh.Query(sql)
-        if err != nil {
-                log.Fatal(err)
-        }
-        defer rows.Close()
-
-        for rows.Next() {
-                var r replication_workload_row
-
-                if err := rows.Scan(&r.NAME, &r.EVENT_NAME, &r.OBJECT_NAME, &r.OPERATION, &r.SUM_TIMER_WAIT, &r.SUM_SPINS, &r.SUM_NUMBER_OF_BYTES); err != nil {
-                        log.Fatal(err)
-                }
-                t = append(t, r)
-        }
-        if err := rows.Err(); err != nil {
-                log.Fatal(err)
-        }
-
-        return t
-}
-
-func (this *replication_workload_row) add(other replication_workload_row) {
-        this.SUM_TIMER_WAIT += other.SUM_TIMER_WAIT
-        this.SUM_SPINS += other.SUM_SPINS
-        this.SUM_NUMBER_OF_BYTES += other.SUM_NUMBER_OF_BYTES
-}
-
-func (this *replication_workload_row) subtract(other replication_workload_row) {
-        this.SUM_TIMER_WAIT -= other.SUM_TIMER_WAIT
-        this.SUM_SPINS -= other.SUM_SPINS
-        this.SUM_NUMBER_OF_BYTES -= other.SUM_NUMBER_OF_BYTES
-}
-
-func (t replication_workload_rows) Len() int      { return len(t) }
-func (t replication_workload_rows) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-// may need to adjust ordering here.!!!
-func (t replication_workload_rows) Less(i, j int) bool {
-        return t[i].SUM_TIMER_WAIT > t[j].SUM_TIMER_WAIT
-}
-
-func (t *replication_workload_rows) sort() {
-        sort.Sort(t)
-}
-
-// if the data in t2 is "newer", "has more values" than t then it needs refreshing.
-// check this by comparing totals.
-func (t replication_workload_rows) needs_refresh(t2 replication_workload_rows) bool {
-        my_totals := t.totals()
-        t2_totals := t2.totals()
-
-        return my_totals.SUM_TIMER_WAIT > t2_totals.SUM_TIMER_WAIT
-}
-