diff --git a/.gitignore b/.gitignore
@@ -0,0 +1,26 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+elastop
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+vendor/
+
+# Go workspace file
+go.work
+
+# IDE specific files
+.idea/
+.vscode/
+*.swp
+*.swo
+.DS_Store
+\ No newline at end of file
diff --git a/.screens/preview.png b/.screens/preview.png
Binary files differ.
diff --git a/LICENSE b/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2025, acidvegas <acid.vegas@acid.vegas>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/README.md b/README.md
@@ -0,0 +1,111 @@
+# Elastop - Elasticsearch Terminal Dashboard
+
+Elastop is a terminal-based dashboard for monitoring Elasticsearch clusters in real-time. It provides a comprehensive view of cluster health, node status, indices, and various performance metrics in an easy-to-read terminal interface. This tool was designed to look visually similar HTOP.
+
+
+
+## Features
+
+- Real-time cluster monitoring
+- Node status and resource usage
+- Index statistics and write rates
+- Search and indexing performance metrics
+- Memory usage and garbage collection stats
+- Network and disk I/O monitoring
+- Color-coded health status indicators
+- Role-based node classification
+- Version compatibility checking
+
+## Installation
+
+```bash
+# Clone the repository
+git git clone https://github.com/yourusername/elastop.git
+cd elastop
+go build
+```
+
+## Usage
+
+```bash
+./elastop [flags]
+```
+
+### Command Line Flags
+| Flag | Description | Default |
+| ----------- | ---------------------- | ------------- |
+| `-host` | Elasticsearch host | `localhost` |
+| `-port` | Elasticsearch port | `9200` |
+| `-user` | Elasticsearch username | `elastic` |
+| `-password` | Elasticsearch password | `ES_PASSWORD` |
+
+## Dashboard Layout
+
+### Header Section
+- Displays cluster name and health status
+- Shows total number of nodes (successful/failed)
+- Indicates version compatibility with latest Elasticsearch release
+
+### Nodes Panel
+- Lists all nodes with their roles and status
+- Shows real-time resource usage:
+ - CPU utilization
+ - Memory usage
+ - Heap usage
+ - Disk space
+ - Load average
+- Displays node version and OS information
+
+### Indices Panel
+- Lists all indices with health status
+- Shows document counts and storage size
+- Displays primary shards and replica configuration
+- Real-time ingestion monitoring with:
+ - Document count changes
+ - Ingestion rates (docs/second)
+ - Active write indicators
+
+### Metrics Panel
+- Search performance:
+ - Query counts and rates
+ - Average query latency
+- Indexing metrics:
+ - Operation counts
+ - Indexing rates
+ - Average indexing latency
+- Memory statistics:
+ - System memory usage
+ - JVM heap utilization
+- GC metrics:
+ - Collection counts
+ - GC timing statistics
+- I/O metrics:
+ - Network traffic (TX/RX)
+ - Disk operations
+ - Open file descriptors
+
+### Role Legend
+Shows all possible node roles with their corresponding colors:
+- M: Master
+- D: Data
+- C: Content
+- H: Hot
+- W: Warm
+- K: Cold
+- F: Frozen
+- I: Ingest
+- L: Machine Learning
+- R: Remote Cluster Client
+- T: Transform
+- V: Voting Only
+- O: Coordinating Only
+
+## Controls
+
+- Press `q` or `ESC` to quit
+- Mouse scrolling supported in all panels
+- Auto-refreshes every 5 seconds
+
+---
+
+###### Mirrors: [acid.vegas](https://git.acid.vegas/elastop) • [SuperNETs](https://git.supernets.org/acidvegas/elastop) • [GitHub](https://github.com/acidvegas/elastop) • [GitLab](https://gitlab.com/acidvegas/elastop) • [Codeberg](https://codeberg.org/acidvegas/elastop)
diff --git a/elastop.go b/elastop.go
@@ -0,0 +1,1028 @@
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type ClusterStats struct {
+ ClusterName string `json:"cluster_name"`
+ Status string `json:"status"`
+ Indices struct {
+ Count int `json:"count"`
+ Shards struct {
+ Total int `json:"total"`
+ } `json:"shards"`
+ Docs struct {
+ Count int `json:"count"`
+ } `json:"docs"`
+ Store struct {
+ SizeInBytes int64 `json:"size_in_bytes"`
+ TotalSizeInBytes int64 `json:"total_size_in_bytes"`
+ } `json:"store"`
+ } `json:"indices"`
+ Nodes struct {
+ Total int `json:"total"`
+ Successful int `json:"successful"`
+ Failed int `json:"failed"`
+ } `json:"_nodes"`
+}
+
+type NodesInfo struct {
+ Nodes map[string]struct {
+ Name string `json:"name"`
+ TransportAddress string `json:"transport_address"`
+ Version string `json:"version"`
+ Roles []string `json:"roles"`
+ OS struct {
+ AvailableProcessors int `json:"available_processors"`
+ Name string `json:"name"`
+ Arch string `json:"arch"`
+ Version string `json:"version"`
+ PrettyName string `json:"pretty_name"`
+ } `json:"os"`
+ Process struct {
+ ID int `json:"id"`
+ Mlockall bool `json:"mlockall"`
+ } `json:"process"`
+ Settings struct {
+ Node struct {
+ Attr struct {
+ ML struct {
+ MachineMem string `json:"machine_memory"`
+ } `json:"ml"`
+ } `json:"attr"`
+ } `json:"node"`
+ } `json:"settings"`
+ } `json:"nodes"`
+}
+
+type IndexStats []struct {
+ Index string `json:"index"`
+ Health string `json:"health"`
+ DocsCount string `json:"docs.count"`
+ StoreSize string `json:"store.size"`
+ PriShards string `json:"pri"`
+ Replicas string `json:"rep"`
+}
+
+type IndexActivity struct {
+ LastDocsCount int
+ IsActive bool
+ InitialDocsCount int
+ StartTime time.Time
+}
+
+type IndexWriteStats struct {
+ Indices map[string]struct {
+ Total struct {
+ Indexing struct {
+ IndexTotal int64 `json:"index_total"`
+ } `json:"indexing"`
+ } `json:"total"`
+ } `json:"indices"`
+}
+
+type ClusterHealth struct {
+ ActiveShards int `json:"active_shards"`
+ ActivePrimaryShards int `json:"active_primary_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializingShards int `json:"initializing_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
+ DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
+ NumberOfPendingTasks int `json:"number_of_pending_tasks"`
+ TaskMaxWaitingTime string `json:"task_max_waiting_time"`
+ ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
+}
+
+type NodesStats struct {
+ Nodes map[string]struct {
+ Indices struct {
+ Store struct {
+ SizeInBytes int64 `json:"size_in_bytes"`
+ } `json:"store"`
+ Search struct {
+ QueryTotal int64 `json:"query_total"`
+ QueryTimeInMillis int64 `json:"query_time_in_millis"`
+ } `json:"search"`
+ Indexing struct {
+ IndexTotal int64 `json:"index_total"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis"`
+ } `json:"indexing"`
+ Segments struct {
+ Count int64 `json:"count"`
+ } `json:"segments"`
+ } `json:"indices"`
+ OS struct {
+ CPU struct {
+ Percent int `json:"percent"`
+ } `json:"cpu"`
+ Memory struct {
+ UsedInBytes int64 `json:"used_in_bytes"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ } `json:"mem"`
+ LoadAverage map[string]float64 `json:"load_average"`
+ } `json:"os"`
+ JVM struct {
+ Memory struct {
+ HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
+ HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
+ } `json:"mem"`
+ GC struct {
+ Collectors struct {
+ Young struct {
+ CollectionCount int64 `json:"collection_count"`
+ CollectionTimeInMillis int64 `json:"collection_time_in_millis"`
+ } `json:"young"`
+ Old struct {
+ CollectionCount int64 `json:"collection_count"`
+ CollectionTimeInMillis int64 `json:"collection_time_in_millis"`
+ } `json:"old"`
+ } `json:"collectors"`
+ } `json:"gc"`
+ } `json:"jvm"`
+ Transport struct {
+ RxSizeInBytes int64 `json:"rx_size_in_bytes"`
+ TxSizeInBytes int64 `json:"tx_size_in_bytes"`
+ RxCount int64 `json:"rx_count"`
+ TxCount int64 `json:"tx_count"`
+ } `json:"transport"`
+ HTTP struct {
+ CurrentOpen int64 `json:"current_open"`
+ } `json:"http"`
+ Process struct {
+ OpenFileDescriptors int64 `json:"open_file_descriptors"`
+ } `json:"process"`
+ FS struct {
+ DiskReads int64 `json:"disk_reads"`
+ DiskWrites int64 `json:"disk_writes"`
+ Total struct {
+ TotalInBytes int64 `json:"total_in_bytes"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ AvailableInBytes int64 `json:"available_in_bytes"`
+ } `json:"total"`
+ Data []struct {
+ Path string `json:"path"`
+ TotalInBytes int64 `json:"total_in_bytes"`
+ FreeInBytes int64 `json:"free_in_bytes"`
+ AvailableInBytes int64 `json:"available_in_bytes"`
+ } `json:"data"`
+ } `json:"fs"`
+ } `json:"nodes"`
+}
+
+type GitHubRelease struct {
+ TagName string `json:"tag_name"`
+}
+
+var (
+ latestVersion string
+ versionCache time.Time
+)
+
+var indexActivities = make(map[string]*IndexActivity)
+
+type IngestionEvent struct {
+ Index string
+ DocCount int
+ Timestamp time.Time
+}
+
+type CatNodesStats struct {
+ Load1m string `json:"load_1m"`
+ Name string `json:"name"`
+}
+
+func bytesToHuman(bytes int64) string {
+ const unit = 1024
+ if bytes < unit {
+ return fmt.Sprintf("%d B", bytes)
+ }
+
+ units := []string{"B", "K", "M", "G", "T", "P", "E", "Z"}
+ exp := 0
+ val := float64(bytes)
+
+ for val >= unit && exp < len(units)-1 {
+ val /= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%.1f%s", val, units[exp])
+}
+
+// In the indices panel section, update the formatting part:
+
+// First, let's create a helper function at package level for number formatting
+func formatNumber(n int) string {
+ // Convert number to string
+ str := fmt.Sprintf("%d", n)
+
+ // Add commas
+ var result []rune
+ for i, r := range str {
+ if i > 0 && (len(str)-i)%3 == 0 {
+ result = append(result, ',')
+ }
+ result = append(result, r)
+ }
+ return string(result)
+}
+
+// Update the convertSizeFormat function to remove decimal points
+func convertSizeFormat(sizeStr string) string {
+ var size float64
+ var unit string
+ fmt.Sscanf(sizeStr, "%f%s", &size, &unit)
+
+ // Convert units like "gb" to "G"
+ unit = strings.ToUpper(strings.TrimSuffix(unit, "b"))
+
+ // Return without decimal points
+ return fmt.Sprintf("%d%s", int(size), unit)
+}
+
+// Update formatResourceSize to return just the number and unit
+func formatResourceSize(bytes int64, targetUnit string) string {
+ const unit = 1024
+ if bytes < unit {
+ return fmt.Sprintf("%3d%s", bytes, targetUnit)
+ }
+
+ units := []string{"B", "K", "M", "G", "T", "P"}
+ exp := 0
+ val := float64(bytes)
+
+ for val >= unit && exp < len(units)-1 {
+ val /= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%3d%s", int(val), targetUnit)
+}
+
+// Add this helper function at package level
+func getPercentageColor(percent float64) string {
+ switch {
+ case percent < 30:
+ return "green"
+ case percent < 70:
+ return "#00ffff" // cyan
+ case percent < 85:
+ return "#ffff00" // yellow
+ default:
+ return "#ff5555" // light red
+ }
+}
+
+func getLatestVersion() string {
+ // Only fetch every hour
+ if time.Since(versionCache) < time.Hour && latestVersion != "" {
+ return latestVersion
+ }
+
+ client := &http.Client{Timeout: 5 * time.Second}
+ resp, err := client.Get("https://api.github.com/repos/elastic/elasticsearch/releases/latest")
+ if err != nil {
+ return ""
+ }
+ defer resp.Body.Close()
+
+ var release GitHubRelease
+ if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
+ return ""
+ }
+
+ // Clean up version string (remove 'v' prefix if present)
+ latestVersion = strings.TrimPrefix(release.TagName, "v")
+ versionCache = time.Now()
+ return latestVersion
+}
+
+func compareVersions(current, latest string) bool {
+ if latest == "" {
+ return true // If we can't get latest version, assume current is ok
+ }
+
+ // Clean up version strings
+ current = strings.TrimPrefix(current, "v")
+ latest = strings.TrimPrefix(latest, "v")
+
+ // Split versions into parts
+ currentParts := strings.Split(current, ".")
+ latestParts := strings.Split(latest, ".")
+
+ // Compare each part
+ for i := 0; i < len(currentParts) && i < len(latestParts); i++ {
+ curr, _ := strconv.Atoi(currentParts[i])
+ lat, _ := strconv.Atoi(latestParts[i])
+ if curr != lat {
+ return curr >= lat
+ }
+ }
+ return len(currentParts) >= len(latestParts)
+}
+
+// Update roleColors map with lighter colors for I and R
+var roleColors = map[string]string{
+ "master": "#ff5555", // red
+ "data": "#50fa7b", // green
+ "data_content": "#8be9fd", // cyan
+ "data_hot": "#ffb86c", // orange
+ "data_warm": "#bd93f9", // purple
+ "data_cold": "#f1fa8c", // yellow
+ "data_frozen": "#ff79c6", // pink
+ "ingest": "#87cefa", // light sky blue (was gray)
+ "ml": "#6272a4", // blue gray
+ "remote_cluster_client": "#dda0dd", // plum (was burgundy)
+ "transform": "#689d6a", // forest green
+ "voting_only": "#458588", // teal
+ "coordinating_only": "#d65d0e", // burnt orange
+}
+
+// Add this map alongside the roleColors map at package level
+var legendLabels = map[string]string{
+ "master": "Master",
+ "data": "Data",
+ "data_content": "Data Content",
+ "data_hot": "Data Hot",
+ "data_warm": "Data Warm",
+ "data_cold": "Data Cold",
+ "data_frozen": "Data Frozen",
+ "ingest": "Ingest",
+ "ml": "Machine Learning",
+ "remote_cluster_client": "Remote Cluster Client",
+ "transform": "Transform",
+ "voting_only": "Voting Only",
+ "coordinating_only": "Coordinating Only",
+}
+
+// Update the formatNodeRoles function to use full width for all possible roles
+func formatNodeRoles(roles []string) string {
+ roleMap := map[string]string{
+ "master": "M",
+ "data": "D",
+ "data_content": "C",
+ "data_hot": "H",
+ "data_warm": "W",
+ "data_cold": "K",
+ "data_frozen": "F",
+ "ingest": "I",
+ "ml": "L",
+ "remote_cluster_client": "R",
+ "transform": "T",
+ "voting_only": "V",
+ "coordinating_only": "O",
+ }
+
+ // Get the role letters and sort them
+ var letters []string
+ for _, role := range roles {
+ if letter, exists := roleMap[role]; exists {
+ letters = append(letters, letter)
+ }
+ }
+ sort.Strings(letters)
+
+ // Create a fixed-width string of 13 spaces (one for each possible role)
+ formattedRoles := " " // 13 spaces
+ runeRoles := []rune(formattedRoles)
+
+ // Fill in the sorted letters
+ for i, letter := range letters {
+ if i < 13 { // Now we can accommodate all possible roles
+ runeRoles[i] = []rune(letter)[0]
+ }
+ }
+
+ // Build the final string with colors
+ var result string
+ for _, r := range runeRoles {
+ if r == ' ' {
+ result += " "
+ } else {
+ // Find the role that corresponds to this letter
+ for role, shortRole := range roleMap {
+ if string(r) == shortRole {
+ result += fmt.Sprintf("[%s]%s[white]", roleColors[role], string(r))
+ break
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// Add a helper function to get health color
+func getHealthColor(health string) string {
+ switch health {
+ case "green":
+ return "green"
+ case "yellow":
+ return "#ffff00" // yellow
+ case "red":
+ return "#ff5555" // light red
+ default:
+ return "white"
+ }
+}
+
+// Update the indexInfo struct to include health
+type indexInfo struct {
+ index string
+ health string
+ docs int
+ storeSize string
+ priShards string
+ replicas string
+ writeOps int64
+ indexingRate float64
+}
+
+// Add startTime at package level
+var startTime = time.Now()
+
+func main() {
+ host := flag.String("host", "localhost", "Elasticsearch host")
+ port := flag.Int("port", 9200, "Elasticsearch port")
+ user := flag.String("user", "elastic", "Elasticsearch username")
+ password := flag.String("password", os.Getenv("ES_PASSWORD"), "Elasticsearch password")
+ flag.Parse()
+
+ app := tview.NewApplication()
+
+ // Update the grid layout to use three columns for the bottom section
+ grid := tview.NewGrid().
+ SetRows(3, 0, 0). // Three rows: header, nodes, bottom panels
+ SetColumns(-1, -2, -1). // Three columns for bottom row: roles (1), indices (2), metrics (1)
+ SetBorders(true)
+
+ // Create the individual panels
+ header := tview.NewTextView().
+ SetDynamicColors(true).
+ SetTextAlign(tview.AlignLeft)
+
+ nodesPanel := tview.NewTextView().
+ SetDynamicColors(true)
+
+ rolesPanel := tview.NewTextView(). // New panel for roles
+ SetDynamicColors(true)
+
+ indicesPanel := tview.NewTextView().
+ SetDynamicColors(true)
+
+ metricsPanel := tview.NewTextView().
+ SetDynamicColors(true)
+
+ // Add panels to grid
+ grid.AddItem(header, 0, 0, 1, 3, 0, 0, false). // Header spans all columns
+ AddItem(nodesPanel, 1, 0, 1, 3, 0, 0, false). // Nodes panel spans all columns
+ AddItem(rolesPanel, 2, 0, 1, 1, 0, 0, false). // Roles panel in left column
+ AddItem(indicesPanel, 2, 1, 1, 1, 0, 0, false). // Indices panel in middle column
+ AddItem(metricsPanel, 2, 2, 1, 1, 0, 0, false) // Metrics panel in right column
+
+ // Update function
+ update := func() {
+ baseURL := fmt.Sprintf("http://%s:%d", *host, *port)
+ client := &http.Client{}
+
+ // Helper function for ES requests
+ makeRequest := func(path string, target interface{}) error {
+ req, err := http.NewRequest("GET", baseURL+path, nil)
+ if err != nil {
+ return err
+ }
+ req.SetBasicAuth(*user, *password)
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(body, target)
+ }
+
+ // Get cluster stats
+ var clusterStats ClusterStats
+ if err := makeRequest("/_cluster/stats", &clusterStats); err != nil {
+ header.SetText(fmt.Sprintf("[red]Error: %v", err))
+ return
+ }
+
+ // Get nodes info
+ var nodesInfo NodesInfo
+ if err := makeRequest("/_nodes", &nodesInfo); err != nil {
+ nodesPanel.SetText(fmt.Sprintf("[red]Error: %v", err))
+ return
+ }
+
+ // Get indices stats
+ var indicesStats IndexStats
+ if err := makeRequest("/_cat/indices?format=json", &indicesStats); err != nil {
+ indicesPanel.SetText(fmt.Sprintf("[red]Error: %v", err))
+ return
+ }
+
+ // Get cluster health
+ var clusterHealth ClusterHealth
+ if err := makeRequest("/_cluster/health", &clusterHealth); err != nil {
+ indicesPanel.SetText(fmt.Sprintf("[red]Error: %v", err))
+ return
+ }
+
+ // Get nodes stats
+ var nodesStats NodesStats
+ if err := makeRequest("/_nodes/stats", &nodesStats); err != nil {
+ indicesPanel.SetText(fmt.Sprintf("[red]Error: %v", err))
+ return
+ }
+
+ // Calculate aggregate metrics
+ var (
+ totalQueries int64
+ totalQueryTime float64
+ totalIndexing int64
+ totalIndexingTime float64
+ totalCPUPercent int
+ totalMemoryUsed int64
+ totalMemoryTotal int64
+ totalHeapUsed int64
+ totalHeapMax int64
+ totalGCCollections int64
+ totalGCTime float64
+ nodeCount int
+ )
+
+ for _, node := range nodesStats.Nodes {
+ totalQueries += node.Indices.Search.QueryTotal
+ totalQueryTime += float64(node.Indices.Search.QueryTimeInMillis) / 1000
+ totalIndexing += node.Indices.Indexing.IndexTotal
+ totalIndexingTime += float64(node.Indices.Indexing.IndexTimeInMillis) / 1000
+ totalCPUPercent += node.OS.CPU.Percent
+ totalMemoryUsed += node.OS.Memory.UsedInBytes
+ totalMemoryTotal += node.OS.Memory.TotalInBytes
+ totalHeapUsed += node.JVM.Memory.HeapUsedInBytes
+ totalHeapMax += node.JVM.Memory.HeapMaxInBytes
+ totalGCCollections += node.JVM.GC.Collectors.Young.CollectionCount + node.JVM.GC.Collectors.Old.CollectionCount
+ totalGCTime += float64(node.JVM.GC.Collectors.Young.CollectionTimeInMillis+node.JVM.GC.Collectors.Old.CollectionTimeInMillis) / 1000
+ nodeCount++
+ }
+
+ // Update header
+ statusColor := map[string]string{
+ "green": "green",
+ "yellow": "yellow",
+ "red": "red",
+ }[clusterStats.Status]
+
+ // Calculate maxNodeNameLen first
+ maxNodeNameLen := 20 // default minimum length
+ for _, nodeInfo := range nodesInfo.Nodes {
+ if len(nodeInfo.Name) > maxNodeNameLen {
+ maxNodeNameLen = len(nodeInfo.Name)
+ }
+ }
+
+ // Then use it in header formatting
+ header.Clear()
+ latestVer := getLatestVersion()
+ fmt.Fprintf(header, "[#00ffff]Cluster:[white] %s [%s]%s[-]%s[#00ffff]Latest: [white]%s\n",
+ clusterStats.ClusterName,
+ statusColor,
+ strings.ToUpper(clusterStats.Status),
+ strings.Repeat(" ", maxNodeNameLen-len(clusterStats.ClusterName)), // Add padding
+ latestVer)
+ fmt.Fprintf(header, "[#00ffff]Nodes:[white] %d Total, [green]%d[white] Successful, [#ff5555]%d[white] Failed\n",
+ clusterStats.Nodes.Total,
+ clusterStats.Nodes.Successful,
+ clusterStats.Nodes.Failed)
+
+ // Update nodes panel
+ nodesPanel.Clear()
+ fmt.Fprintf(nodesPanel, "[::b][#00ffff]Nodes Information[::-]\n\n")
+ fmt.Fprintf(nodesPanel, "[::b]%-*s [#444444]│[#00ffff] %-13s [#444444]│[#00ffff] %-20s [#444444]│[#00ffff] %-7s [#444444]│[#00ffff] %4s [#444444]│[#00ffff] %4s [#444444]│[#00ffff] %-16s [#444444]│[#00ffff] %-16s [#444444]│[#00ffff] %-16s [#444444]│[#00ffff] %-25s[white]\n",
+ maxNodeNameLen,
+ "Node Name",
+ "Roles",
+ "Transport Address",
+ "Version",
+ "CPU",
+ "Load",
+ "Memory",
+ "Heap",
+ "Disk ",
+ "OS")
+
+ // Display nodes with resource usage
+ for id, nodeInfo := range nodesInfo.Nodes {
+ nodeStats, exists := nodesStats.Nodes[id]
+ if !exists {
+ continue
+ }
+
+ // Calculate resource percentages and format memory values
+ cpuPercent := nodeStats.OS.CPU.Percent
+ memPercent := float64(nodeStats.OS.Memory.UsedInBytes) / float64(nodeStats.OS.Memory.TotalInBytes) * 100
+ heapPercent := float64(nodeStats.JVM.Memory.HeapUsedInBytes) / float64(nodeStats.JVM.Memory.HeapMaxInBytes) * 100
+
+ // Calculate disk usage - use the data path stats
+ diskTotal := int64(0)
+ diskAvailable := int64(0)
+ if len(nodeStats.FS.Data) > 0 {
+ // Use the first data path's stats - this is the Elasticsearch data directory
+ diskTotal = nodeStats.FS.Data[0].TotalInBytes // e.g. 5.6TB for r320-1
+ diskAvailable = nodeStats.FS.Data[0].AvailableInBytes // e.g. 5.0TB available
+ } else {
+ // Fallback to total stats if data path stats aren't available
+ diskTotal = nodeStats.FS.Total.TotalInBytes
+ diskAvailable = nodeStats.FS.Total.AvailableInBytes
+ }
+ diskUsed := diskTotal - diskAvailable
+ diskPercent := float64(diskUsed) / float64(diskTotal) * 100
+
+ versionColor := "yellow"
+ if compareVersions(nodeInfo.Version, latestVer) {
+ versionColor = "green"
+ }
+
+ // Add this request before the nodes panel update
+ var catNodesStats []CatNodesStats
+ if err := makeRequest("/_cat/nodes?format=json&h=name,load_1m", &catNodesStats); err != nil {
+ nodesPanel.SetText(fmt.Sprintf("[red]Error getting cat nodes stats: %v", err))
+ return
+ }
+
+ // Create a map for quick lookup of load averages by node name
+ nodeLoads := make(map[string]string)
+ for _, node := range catNodesStats {
+ nodeLoads[node.Name] = node.Load1m
+ }
+
+ fmt.Fprintf(nodesPanel, "[#5555ff]%-*s[white] [#444444]│[white] %s [#444444]│[white] [white]%-20s[white] [#444444]│[white] [%s]%-7s[white] [#444444]│[white] [%s]%3d%% [#444444](%d)[white] [#444444]│[white] %4s [#444444]│[white] %4s / %4s [%s]%3d%%[white] [#444444]│[white] %4s / %4s [%s]%3d%%[white] [#444444]│[white] %4s / %4s [%s]%3d%%[white] [#444444]│[white] %s [#bd93f9]%s[white] [#444444](%s)[white]\n",
+ maxNodeNameLen,
+ nodeInfo.Name,
+ formatNodeRoles(nodeInfo.Roles),
+ nodeInfo.TransportAddress,
+ versionColor,
+ nodeInfo.Version,
+ getPercentageColor(float64(cpuPercent)),
+ cpuPercent,
+ nodeInfo.OS.AvailableProcessors,
+ nodeLoads[nodeInfo.Name],
+ formatResourceSize(nodeStats.OS.Memory.UsedInBytes, "G"),
+ formatResourceSize(nodeStats.OS.Memory.TotalInBytes, "G"),
+ getPercentageColor(memPercent),
+ int(memPercent),
+ formatResourceSize(nodeStats.JVM.Memory.HeapUsedInBytes, "G"),
+ formatResourceSize(nodeStats.JVM.Memory.HeapMaxInBytes, "G"),
+ getPercentageColor(heapPercent),
+ int(heapPercent),
+ formatResourceSize(diskUsed, "G"),
+ formatResourceSize(diskTotal, "T"),
+ getPercentageColor(diskPercent),
+ int(diskPercent),
+ nodeInfo.OS.PrettyName,
+ nodeInfo.OS.Version,
+ nodeInfo.OS.Arch)
+ }
+
+ // Update indices panel
+ indicesPanel.Clear()
+ fmt.Fprintf(indicesPanel, "[::b][#00ffff]Indices Information[::-]\n\n")
+ fmt.Fprintf(indicesPanel, " [::b]%-20s %15s %12s %8s %8s %-12s %-10s[white]\n",
+ "Index Name",
+ "Documents",
+ "Size",
+ "Shards",
+ "Replicas",
+ "Ingested",
+ "Rate")
+
+ totalDocs := 0
+ totalSize := int64(0)
+ for _, node := range nodesStats.Nodes {
+ totalSize += node.FS.Total.TotalInBytes - node.FS.Total.AvailableInBytes
+ }
+
+ // Get detailed index stats for write operations
+ var indexWriteStats IndexWriteStats
+ if err := makeRequest("/_stats", &indexWriteStats); err != nil {
+ indicesPanel.SetText(fmt.Sprintf("[red]Error getting write stats: %v", err))
+ return
+ }
+
+ // Create a slice to hold indices for sorting
+ var indices []indexInfo
+
+ // Collect index information
+ for _, index := range indicesStats {
+ // Skip hidden indices
+ if !strings.HasPrefix(index.Index, ".") && index.DocsCount != "0" {
+ docs := 0
+ fmt.Sscanf(index.DocsCount, "%d", &docs)
+ totalDocs += docs
+
+ // Track document changes
+ activity, exists := indexActivities[index.Index]
+ if !exists {
+ indexActivities[index.Index] = &IndexActivity{
+ LastDocsCount: docs,
+ InitialDocsCount: docs,
+ StartTime: time.Now(),
+ }
+ } else {
+ activity.LastDocsCount = docs
+ }
+
+ // Get write operations count and calculate rate
+ writeOps := int64(0)
+ indexingRate := float64(0)
+ if stats, exists := indexWriteStats.Indices[index.Index]; exists {
+ writeOps = stats.Total.Indexing.IndexTotal
+ if activity, ok := indexActivities[index.Index]; ok {
+ timeDiff := time.Since(activity.StartTime).Seconds()
+ if timeDiff > 0 {
+ indexingRate = float64(docs-activity.InitialDocsCount) / timeDiff
+ }
+ }
+ }
+
+ indices = append(indices, indexInfo{
+ index: index.Index,
+ health: index.Health,
+ docs: docs,
+ storeSize: index.StoreSize,
+ priShards: index.PriShards,
+ replicas: index.Replicas,
+ writeOps: writeOps,
+ indexingRate: indexingRate,
+ })
+ }
+ }
+
+ // Sort indices by document count (descending)
+ sort.Slice(indices, func(i, j int) bool {
+ return indices[i].docs > indices[j].docs
+ })
+
+ // Display sorted indices
+ for _, idx := range indices {
+ // Only show purple dot if there's actual indexing happening
+ writeIcon := "[#444444]⚪"
+ if idx.indexingRate > 0 {
+ writeIcon = "[#5555ff]⚫"
+ }
+
+ // Calculate document changes
+ activity := indexActivities[idx.index]
+ ingestedStr := ""
+ if activity != nil && activity.InitialDocsCount < idx.docs {
+ docChange := idx.docs - activity.InitialDocsCount
+ ingestedStr = fmt.Sprintf("[green]+%-11s", formatNumber(docChange))
+ } else {
+ ingestedStr = fmt.Sprintf("%-12s", "") // Empty space if no changes
+ }
+
+ // Format indexing rate
+ rateStr := ""
+ if idx.indexingRate > 0 {
+ if idx.indexingRate >= 1000 {
+ rateStr = fmt.Sprintf("[#50fa7b]%.1fk/s", idx.indexingRate/1000)
+ } else {
+ rateStr = fmt.Sprintf("[#50fa7b]%.1f/s", idx.indexingRate)
+ }
+ } else {
+ rateStr = "[#444444]0/s"
+ }
+
+ // Convert the size format before display
+ sizeStr := convertSizeFormat(idx.storeSize)
+
+ fmt.Fprintf(indicesPanel, "%s [%s]%-20s[white] %15s %12s %8s %8s %s %-10s\n",
+ writeIcon,
+ getHealthColor(idx.health),
+ idx.index,
+ formatNumber(idx.docs),
+ sizeStr,
+ idx.priShards,
+ idx.replicas,
+ ingestedStr,
+ rateStr)
+ }
+
+ // Calculate total indexing rate for the cluster
+ totalIndexingRate := float64(0)
+ for _, idx := range indices {
+ totalIndexingRate += idx.indexingRate
+ }
+
+ // Format cluster indexing rate
+ clusterRateStr := ""
+ if totalIndexingRate > 0 {
+ if totalIndexingRate >= 1000000 {
+ clusterRateStr = fmt.Sprintf("[#50fa7b]%.1fM/s", totalIndexingRate/1000000)
+ } else if totalIndexingRate >= 1000 {
+ clusterRateStr = fmt.Sprintf("[#50fa7b]%.1fK/s", totalIndexingRate/1000)
+ } else {
+ clusterRateStr = fmt.Sprintf("[#50fa7b]%.1f/s", totalIndexingRate)
+ }
+ } else {
+ clusterRateStr = "[#444444]0/s"
+ }
+
+ // Display the totals with indexing rate
+ fmt.Fprintf(indicesPanel, "\n[#00ffff]Total Documents:[white] %s, [#00ffff]Total Size:[white] %s, [#00ffff]Indexing Rate:[white] %s\n",
+ formatNumber(totalDocs),
+ bytesToHuman(totalSize),
+ clusterRateStr)
+
+ // Move shard stats to bottom of indices panel
+ fmt.Fprintf(indicesPanel, "\n[#00ffff]Shard Status:[white] Active: %d (%.1f%%), Primary: %d, Relocating: %d, Initializing: %d, Unassigned: %d\n",
+ clusterHealth.ActiveShards,
+ clusterHealth.ActiveShardsPercentAsNumber,
+ clusterHealth.ActivePrimaryShards,
+ clusterHealth.RelocatingShards,
+ clusterHealth.InitializingShards,
+ clusterHealth.UnassignedShards)
+
+ // Update metrics panel
+ metricsPanel.Clear()
+ fmt.Fprintf(metricsPanel, "[::b][#00ffff]Cluster Metrics[::-]\n\n")
+
+ // Helper function to format metric lines with consistent alignment
+ formatMetric := func(name string, value string) string {
+ return fmt.Sprintf("[#00ffff]%-25s[white] %s\n", name+":", value)
+ }
+
+ // Search metrics
+ fmt.Fprint(metricsPanel, formatMetric("Search Queries", formatNumber(int(totalQueries))))
+ fmt.Fprint(metricsPanel, formatMetric("Query Rate", fmt.Sprintf("%s/s", formatNumber(int(float64(totalQueries)/time.Since(startTime).Seconds())))))
+ fmt.Fprint(metricsPanel, formatMetric("Total Query Time", fmt.Sprintf("%.1fs", totalQueryTime)))
+ fmt.Fprint(metricsPanel, formatMetric("Avg Query Latency", fmt.Sprintf("%.2fms", totalQueryTime*1000/float64(totalQueries+1))))
+
+ // Indexing metrics
+ fmt.Fprint(metricsPanel, formatMetric("Index Operations", formatNumber(int(totalIndexing))))
+ fmt.Fprint(metricsPanel, formatMetric("Indexing Rate", fmt.Sprintf("%s/s", formatNumber(int(float64(totalIndexing)/time.Since(startTime).Seconds())))))
+ fmt.Fprint(metricsPanel, formatMetric("Total Index Time", fmt.Sprintf("%.1fs", totalIndexingTime)))
+ fmt.Fprint(metricsPanel, formatMetric("Avg Index Latency", fmt.Sprintf("%.2fms", totalIndexingTime*1000/float64(totalIndexing+1))))
+
+ // GC metrics
+ fmt.Fprint(metricsPanel, formatMetric("GC Collections", formatNumber(int(totalGCCollections))))
+ fmt.Fprint(metricsPanel, formatMetric("Total GC Time", fmt.Sprintf("%.1fs", totalGCTime)))
+ fmt.Fprint(metricsPanel, formatMetric("Avg GC Time", fmt.Sprintf("%.2fms", totalGCTime*1000/float64(totalGCCollections+1))))
+
+ // Memory metrics
+ totalMemoryPercent := float64(totalMemoryUsed) / float64(totalMemoryTotal) * 100
+ totalHeapPercent := float64(totalHeapUsed) / float64(totalHeapMax) * 100
+ fmt.Fprint(metricsPanel, formatMetric("Memory Usage", fmt.Sprintf("%s / %s (%.1f%%)", bytesToHuman(totalMemoryUsed), bytesToHuman(totalMemoryTotal), totalMemoryPercent)))
+ fmt.Fprint(metricsPanel, formatMetric("Heap Usage", fmt.Sprintf("%s / %s (%.1f%%)", bytesToHuman(totalHeapUsed), bytesToHuman(totalHeapMax), totalHeapPercent)))
+
+ // Segment metrics
+ fmt.Fprint(metricsPanel, formatMetric("Total Segments", formatNumber(int(getTotalSegments(nodesStats)))))
+ fmt.Fprint(metricsPanel, formatMetric("Open File Descriptors", formatNumber(int(getTotalOpenFiles(nodesStats)))))
+
+ // Network metrics
+ fmt.Fprint(metricsPanel, formatMetric("Network TX", bytesToHuman(getTotalNetworkTX(nodesStats))))
+ fmt.Fprint(metricsPanel, formatMetric("Network RX", bytesToHuman(getTotalNetworkRX(nodesStats))))
+
+ // Disk I/O metrics
+ totalDiskReads := int64(0)
+ totalDiskWrites := int64(0)
+ for _, node := range nodesStats.Nodes {
+ totalDiskReads += node.FS.DiskReads
+ totalDiskWrites += node.FS.DiskWrites
+ }
+ fmt.Fprint(metricsPanel, formatMetric("Disk Reads", formatNumber(int(totalDiskReads))))
+ fmt.Fprint(metricsPanel, formatMetric("Disk Writes", formatNumber(int(totalDiskWrites))))
+
+ // HTTP connections
+ totalHTTPConnections := int64(0)
+ for _, node := range nodesStats.Nodes {
+ totalHTTPConnections += node.HTTP.CurrentOpen
+ }
+ fmt.Fprint(metricsPanel, formatMetric("HTTP Connections", formatNumber(int(totalHTTPConnections))))
+
+ // Average CPU usage across nodes
+ avgCPUPercent := float64(totalCPUPercent) / float64(nodeCount)
+ fmt.Fprint(metricsPanel, formatMetric("Average CPU Usage", fmt.Sprintf("%.1f%%", avgCPUPercent)))
+
+ // Pending tasks
+ fmt.Fprint(metricsPanel, formatMetric("Pending Tasks", formatNumber(clusterHealth.NumberOfPendingTasks)))
+ if clusterHealth.TaskMaxWaitingTime != "" && clusterHealth.TaskMaxWaitingTime != "0s" {
+ fmt.Fprint(metricsPanel, formatMetric("Max Task Wait Time", clusterHealth.TaskMaxWaitingTime))
+ }
+
+ // Update roles panel
+ rolesPanel.Clear()
+ fmt.Fprintf(rolesPanel, "[::b][#00ffff]Node Roles[::-]\n\n")
+
+ // Create a map of used roles
+ usedRoles := make(map[string]bool)
+ for _, nodeInfo := range nodesInfo.Nodes {
+ for _, role := range nodeInfo.Roles {
+ usedRoles[role] = true
+ }
+ }
+
+ // Display roles in the roles panel
+ roleLegend := [][2]string{
+ {"C", "data_content"},
+ {"D", "data"},
+ {"F", "data_frozen"},
+ {"H", "data_hot"},
+ {"I", "ingest"},
+ {"K", "data_cold"},
+ {"L", "ml"},
+ {"M", "master"},
+ {"O", "coordinating_only"},
+ {"R", "remote_cluster_client"},
+ {"T", "transform"},
+ {"V", "voting_only"},
+ {"W", "data_warm"},
+ }
+
+ for _, role := range roleLegend {
+ if usedRoles[role[1]] {
+ fmt.Fprintf(rolesPanel, "[%s]%s[white] %s\n",
+ roleColors[role[1]],
+ role[0],
+ legendLabels[role[1]])
+ } else {
+ fmt.Fprintf(rolesPanel, "[#444444]%s %s\n",
+ role[0],
+ legendLabels[role[1]])
+ }
+ }
+ }
+
+ // Set up periodic updates
+ go func() {
+ for {
+ app.QueueUpdateDraw(func() {
+ update()
+ })
+ time.Sleep(5 * time.Second)
+ }
+ }()
+
+ // Handle quit
+ app.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {
+ if event.Key() == tcell.KeyEsc || event.Rune() == 'q' {
+ app.Stop()
+ }
+ return event
+ })
+
+ if err := app.SetRoot(grid, true).EnableMouse(true).Run(); err != nil {
+ panic(err)
+ }
+}
+
+// Add these helper functions at package level
+func getTotalSegments(stats NodesStats) int64 {
+ var total int64
+ for _, node := range stats.Nodes {
+ total += node.Indices.Segments.Count
+ }
+ return total
+}
+
+func getTotalOpenFiles(stats NodesStats) int64 {
+ var total int64
+ for _, node := range stats.Nodes {
+ total += node.Process.OpenFileDescriptors
+ }
+ return total
+}
+
+func getTotalNetworkTX(stats NodesStats) int64 {
+ var total int64
+ for _, node := range stats.Nodes {
+ total += node.Transport.TxSizeInBytes
+ }
+ return total
+}
+
+func getTotalNetworkRX(stats NodesStats) int64 {
+ var total int64
+ for _, node := range stats.Nodes {
+ total += node.Transport.RxSizeInBytes
+ }
+ return total
+}
diff --git a/go.mod b/go.mod
@@ -0,0 +1,18 @@
+module elastop
+
+go 1.23.2
+
+require (
+ github.com/gdamore/tcell/v2 v2.7.4
+ github.com/rivo/tview v0.0.0-20241103174730-c76f7879f592
+)
+
+require (
+ github.com/gdamore/encoding v1.0.0 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ golang.org/x/sys v0.17.0 // indirect
+ golang.org/x/term v0.17.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+)
diff --git a/go.sum b/go.sum
@@ -0,0 +1,50 @@
+github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
+github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
+github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
+github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/rivo/tview v0.0.0-20241103174730-c76f7879f592 h1:YIJ+B1hePP6AgynC5TcqpO0H9k3SSoZa2BGyL6vDUzM=
+github.com/rivo/tview v0.0.0-20241103174730-c76f7879f592/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
| | | | | | |