Skip to content
This repository was archived by the owner on Mar 8, 2023. It is now read-only.

Commit 9fee681

Browse files
vitorarinsgrobie
authored andcommitted
Export stats about response time (#38)
* Added stats about response time from HAProxy v1.5.10 * Refactor tests to add new fields of response time; Change version of HAProxy stats. * Change new metrics names to add average and units. Signed-off-by: Vitor Arins <[email protected]> * Refactor minimum csv field count; Check if the field index is valid. Signed-off-by: Vitor Arins <[email protected]> * Correct value for minimum csv field count. Signed-off-by: Vitor Arins <[email protected]> * Add test for older Haproxy versions. Signed-off-by: Vitor Arins <[email protected]>
1 parent a56e4d5 commit 9fee681

File tree

2 files changed

+53
-10
lines changed

2 files changed

+53
-10
lines changed

haproxy_exporter.go

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,10 @@ const (
2929
// # pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,
3030
// HAProxy 1.5
3131
// pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,
32-
expectedCsvFieldCount = 52
33-
statusField = 17
32+
// HAProxy 1.5.19
33+
// pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,check_status,check_code,check_duration,hrsp_1xx,hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,comp_in,comp_out,comp_byp,comp_rsp,lastsess,last_chk,last_agt,qtime,ctime,rtime,ttime,
34+
minimumCsvFieldCount = 33
35+
statusField = 17
3436
)
3537

3638
var (
@@ -210,6 +212,10 @@ func NewExporter(uri string, selectedServerMetrics map[int]*prometheus.GaugeVec,
210212
42: newBackendMetric("http_responses_total", "Total of HTTP responses.", prometheus.Labels{"code": "4xx"}),
211213
43: newBackendMetric("http_responses_total", "Total of HTTP responses.", prometheus.Labels{"code": "5xx"}),
212214
44: newBackendMetric("http_responses_total", "Total of HTTP responses.", prometheus.Labels{"code": "other"}),
215+
58: newBackendMetric("http_queue_time_average_seconds", "Avg. HTTP queue time for last 1024 successful connections.", nil),
216+
59: newBackendMetric("http_connect_time_average_seconds", "Avg. HTTP connect time for last 1024 successful connections.", nil),
217+
60: newBackendMetric("http_response_time_average_seconds", "Avg. HTTP response time for last 1024 successful connections.", nil),
218+
61: newBackendMetric("http_total_time_average_seconds", "Avg. HTTP total time for last 1024 successful connections.", nil),
213219
},
214220
serverMetrics: selectedServerMetrics,
215221
}, nil
@@ -351,8 +357,8 @@ func (e *Exporter) collectMetrics(metrics chan<- prometheus.Metric) {
351357
}
352358

353359
func (e *Exporter) parseRow(csvRow []string) {
354-
if len(csvRow) < expectedCsvFieldCount {
355-
log.Errorf("Wrong CSV field count: %d vs. %d", len(csvRow), expectedCsvFieldCount)
360+
if len(csvRow) < minimumCsvFieldCount {
361+
log.Errorf("Parser expected at least %d CSV fields, but got: %d", minimumCsvFieldCount, len(csvRow))
356362
e.csvParseFailures.Inc()
357363
return
358364
}
@@ -388,6 +394,9 @@ func parseStatusField(value string) int64 {
388394

389395
func (e *Exporter) exportCsvFields(metrics map[int]*prometheus.GaugeVec, csvRow []string, labels ...string) {
390396
for fieldIdx, metric := range metrics {
397+
if fieldIdx > len(csvRow)-1 {
398+
break
399+
}
391400
valueStr := csvRow[fieldIdx]
392401
if valueStr == "" {
393402
continue

haproxy_exporter_test.go

Lines changed: 40 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ func TestInvalidConfig(t *testing.T) {
8787
}
8888

8989
func TestServerWithoutChecks(t *testing.T) {
90-
h := newHaproxy([]byte("test,127.0.0.1:8080,0,0,0,0,0,0,0,0,,0,,0,0,0,0,no check,1,1,0,0,,,0,,1,1,1,,0,,2,0,,0,,,,0,0,0,0,0,0,0,,,,0,0,"))
90+
h := newHaproxy([]byte("test,127.0.0.1:8080,0,0,0,0,0,0,0,0,,0,,0,0,0,0,no check,1,1,0,0,,,0,,1,1,1,,0,,2,0,,0,,,,0,0,0,0,0,0,0,,,,0,0,,,,,,,,,,,"))
9191
defer h.Close()
9292

9393
e, _ := NewExporter(h.URL, serverMetrics, 5*time.Second)
@@ -125,10 +125,10 @@ func TestServerWithoutChecks(t *testing.T) {
125125
// * http://permalink.gmane.org/gmane.comp.web.haproxy/26561
126126
//
127127
func TestServerBrokenCSV(t *testing.T) {
128-
const data = `foo,FRONTEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,
129-
foo,bug-missing-comma,0,0,0,0,,0,0,0,,0,,0,0,0,0,DRAIN (agent)1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,
130-
foo,foo-instance-0,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,
131-
foo,BACKEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,
128+
const data = `foo,FRONTEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,,,,,,,,,,,
129+
foo,bug-missing-comma,0,0,0,0,,0,0,0,,0,,0,0,0,0,DRAIN (agent)1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,,,,,,,,,,,
130+
foo,foo-instance-0,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,,,,,,,,,,,
131+
foo,BACKEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4OK,,0,,,,,,,0,,,,0,0,,,,,,,,,,,
132132
`
133133
h := newHaproxy([]byte(data))
134134
defer h.Close()
@@ -163,6 +163,40 @@ foo,BACKEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,0,,0,L4O
163163
}
164164
}
165165

166+
func TestOlderHaproxyVersions(t *testing.T) {
167+
const data = `foo,FRONTEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,
168+
foo,foo-instance-0,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,
169+
foo,BACKEND,0,0,0,0,,0,0,0,,0,,0,0,0,0,UP,1,1,0,0,0,5007,0,,1,8,1,,0,,2,
170+
`
171+
h := newHaproxy([]byte(data))
172+
defer h.Close()
173+
174+
e, _ := NewExporter(h.URL, serverMetrics, 5*time.Second)
175+
ch := make(chan prometheus.Metric)
176+
177+
go func() {
178+
defer close(ch)
179+
e.Collect(ch)
180+
}()
181+
182+
if expect, got := 1., readGauge((<-ch).(prometheus.Gauge)); expect != got {
183+
// up
184+
t.Errorf("expected %f up, got %f", expect, got)
185+
}
186+
if expect, got := 1., readCounter((<-ch).(prometheus.Counter)); expect != got {
187+
// totalScrapes
188+
t.Errorf("expected %f recorded scrape, got %f", expect, got)
189+
}
190+
if expect, got := 0., readCounter((<-ch).(prometheus.Counter)); expect != got {
191+
// csvParseFailures
192+
t.Errorf("expected %f csv parse failures, got %f", expect, got)
193+
}
194+
195+
// Suck up the remaining metrics.
196+
for range ch {
197+
}
198+
}
199+
166200
func TestConfigChangeDetection(t *testing.T) {
167201
h := newHaproxy([]byte(""))
168202
defer h.Close()
@@ -285,7 +319,7 @@ func TestUnixDomain(t *testing.T) {
285319
t.Skip("not on windows")
286320
return
287321
}
288-
srv, err := newHaproxyUnix(testSocket, "test,127.0.0.1:8080,0,0,0,0,0,0,0,0,,0,,0,0,0,0,no check,1,1,0,0,,,0,,1,1,1,,0,,2,0,,0,,,,0,0,0,0,0,0,0,,,,0,0,\n")
322+
srv, err := newHaproxyUnix(testSocket, "test,127.0.0.1:8080,0,0,0,0,0,0,0,0,,0,,0,0,0,0,no check,1,1,0,0,,,0,,1,1,1,,0,,2,0,,0,,,,0,0,0,0,0,0,0,,,,0,0,,,,,,,,,,,\n")
289323
if err != nil {
290324
t.Fatalf("can't start test server: %v", err)
291325
}

0 commit comments

Comments
 (0)