mirror of
https://github.com/goharbor/harbor
synced 2025-05-21 20:00:19 +00:00
Add test case for exporter
add e2e test to verify exporter and jobservice metrics exist Signed-off-by: DQ <dengq@vmware.com>
This commit is contained in:
parent
7eebbeebdf
commit
dcb28d8e30
@ -14,11 +14,11 @@ var (
|
||||
valueType: prometheus.GaugeValue,
|
||||
}
|
||||
jobServiceTaskQueueLatency = typedDesc{
|
||||
desc: newDescWithLables("", "task_queue_latency", "the time last taks processed", "type"),
|
||||
desc: newDescWithLables("", "task_queue_latency", "how long ago the next job to be processed was enqueued", "type"),
|
||||
valueType: prometheus.GaugeValue,
|
||||
}
|
||||
jobServiceConcurrency = typedDesc{
|
||||
desc: newDescWithLables("", "task_concurrecy", "Total number of concurrency on a pool", "type", "pool"),
|
||||
desc: newDescWithLables("", "task_concurrency", "Total number of concurrency on a pool", "type", "pool"),
|
||||
valueType: prometheus.GaugeValue,
|
||||
}
|
||||
jobServiceScheduledJobTotal = typedDesc{
|
||||
@ -43,10 +43,9 @@ type JobServiceCollector struct {
|
||||
|
||||
// Describe implements prometheus.Collector
|
||||
func (hc *JobServiceCollector) Describe(c chan<- *prometheus.Desc) {
|
||||
c <- jobServiceTaskQueueSize.Desc()
|
||||
c <- jobServiceTaskQueueLatency.Desc()
|
||||
c <- jobServiceConcurrency.Desc()
|
||||
c <- jobServiceScheduledJobFails.Desc()
|
||||
for _, jd := range hc.getDescribeInfo() {
|
||||
c <- jd
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector
|
||||
@ -56,6 +55,16 @@ func (hc *JobServiceCollector) Collect(c chan<- prometheus.Metric) {
|
||||
}
|
||||
}
|
||||
|
||||
func (hc *JobServiceCollector) getDescribeInfo() []*prometheus.Desc {
|
||||
return []*prometheus.Desc{
|
||||
jobServiceTaskQueueSize.Desc(),
|
||||
jobServiceTaskQueueLatency.Desc(),
|
||||
jobServiceConcurrency.Desc(),
|
||||
jobServiceScheduledJobTotal.Desc(),
|
||||
jobServiceScheduledJobFails.Desc(),
|
||||
}
|
||||
}
|
||||
|
||||
func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric {
|
||||
if CacheEnabled() {
|
||||
value, ok := CacheGet(JobServiceCollectorName)
|
||||
@ -63,26 +72,9 @@ func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric {
|
||||
return value.([]prometheus.Metric)
|
||||
}
|
||||
}
|
||||
result := []prometheus.Metric{}
|
||||
|
||||
// Get concurrency info via raw redis client
|
||||
rdsConn := GetRedisPool().Get()
|
||||
values, err := redis.Values(rdsConn.Do("SMEMBERS", redisKeyKnownJobs(jsNamespace)))
|
||||
checkErr(err, "err when get known jobs")
|
||||
var jobs []string
|
||||
for _, v := range values {
|
||||
jobs = append(jobs, string(v.([]byte)))
|
||||
}
|
||||
for _, job := range jobs {
|
||||
values, err := redis.Values(rdsConn.Do("HGETALL", redisKeyJobsLockInfo(jsNamespace, job)))
|
||||
checkErr(err, "err when get job lock info")
|
||||
for i := 0; i < len(values); i += 2 {
|
||||
key, _ := redis.String(values[i], nil)
|
||||
value, _ := redis.Float64(values[i+1], nil)
|
||||
result = append(result, jobServiceConcurrency.MustNewConstMetric(value, job, key))
|
||||
}
|
||||
}
|
||||
rdsConn.Close()
|
||||
result := getConccurrentInfo()
|
||||
|
||||
// get info via jobservice client
|
||||
cli := GetBackendWorker()
|
||||
@ -104,3 +96,22 @@ func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func getConccurrentInfo() []prometheus.Metric {
|
||||
rdsConn := GetRedisPool().Get()
|
||||
defer rdsConn.Close()
|
||||
result := []prometheus.Metric{}
|
||||
knownJobvalues, err := redis.Values(rdsConn.Do("SMEMBERS", redisKeyKnownJobs(jsNamespace)))
|
||||
checkErr(err, "err when get known jobs")
|
||||
for _, v := range knownJobvalues {
|
||||
job := string(v.([]byte))
|
||||
lockInfovalues, err := redis.Values(rdsConn.Do("HGETALL", redisKeyJobsLockInfo(jsNamespace, job)))
|
||||
checkErr(err, "err when get job lock info")
|
||||
for i := 0; i < len(lockInfovalues); i += 2 {
|
||||
key, _ := redis.String(lockInfovalues[i], nil)
|
||||
value, _ := redis.Float64(lockInfovalues[i+1], nil)
|
||||
result = append(result, jobServiceConcurrency.MustNewConstMetric(value, job, key))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -29,21 +29,6 @@ type RedisPoolConfig struct {
|
||||
IdleTimeoutSecond int
|
||||
}
|
||||
|
||||
// InitRedisCli ...
|
||||
func InitRedisCli(rdsCfg *RedisPoolConfig) {
|
||||
pool, err := redislib.GetRedisPool("JobService", rdsCfg.URL, &redislib.PoolParam{
|
||||
PoolMaxIdle: 6,
|
||||
PoolIdleTimeout: time.Duration(rdsCfg.IdleTimeoutSecond) * time.Second,
|
||||
DialConnectionTimeout: dialConnectionTimeout,
|
||||
DialReadTimeout: dialReadTimeout,
|
||||
DialWriteTimeout: dialWriteTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
redisPool = pool
|
||||
}
|
||||
|
||||
// InitBackendWorker initiate backend worker
|
||||
func InitBackendWorker(redisPoolConfig *RedisPoolConfig) {
|
||||
pool, err := redislib.GetRedisPool("JobService", redisPoolConfig.URL, &redislib.PoolParam{
|
||||
|
@ -10,12 +10,27 @@ class TestMetricsExist(unittest.TestCase):
|
||||
golang_basic_metrics = ["go_gc_duration_seconds", "go_goroutines", "go_info", "go_memstats_alloc_bytes"]
|
||||
|
||||
metrics = {
|
||||
'core': golang_basic_metrics + ["harbor_core_http_request_total", "harbor_core_http_request_duration_seconds",
|
||||
"harbor_core_http_inflight_requests"],
|
||||
'core': golang_basic_metrics + [
|
||||
"harbor_core_http_request_total",
|
||||
"harbor_core_http_request_duration_seconds",
|
||||
"harbor_core_http_inflight_requests"],
|
||||
'registry': golang_basic_metrics + ["registry_http_in_flight_requests"],
|
||||
'exporter': golang_basic_metrics + ["artifact_pulled",
|
||||
"harbor_project_artifact_total", "harbor_project_member_total", "harbor_project_quota_byte",
|
||||
"harbor_project_repo_total", "harbor_project_total", "project_quota_usage_byte"]
|
||||
'exporter': golang_basic_metrics + [
|
||||
"artifact_pulled",
|
||||
"harbor_project_artifact_total",
|
||||
"harbor_project_member_total",
|
||||
"harbor_project_quota_byte",
|
||||
"harbor_project_repo_total",
|
||||
"harbor_project_total",
|
||||
"project_quota_usage_byte",
|
||||
"harbor_task_concurrency",
|
||||
"harbor_task_queue_latency",
|
||||
"harbor_task_queue_size",
|
||||
"harbor_task_scheduled_total"],
|
||||
'jobservice': golang_basic_metrics + [
|
||||
"harbor_jobservice_info",
|
||||
"harbor_jobservice_task_process_time_seconds",
|
||||
"harbor_jobservice_task_total"]
|
||||
}
|
||||
|
||||
def get_metrics(self):
|
||||
@ -23,10 +38,14 @@ class TestMetricsExist(unittest.TestCase):
|
||||
exporter_res = requests.get(metrics_url)
|
||||
core_res = requests.get(metrics_url, params={'comp': 'core'})
|
||||
reg_res = requests.get(metrics_url, params={'comp': 'registry'})
|
||||
return [('exporter', exporter_res.text), ('core', core_res.text), ('registry', reg_res.text)]
|
||||
js_res = requests.get(metrics_url, params={'comp': 'jobservice'})
|
||||
return [('exporter', exporter_res.text), ('core', core_res.text), ('registry', reg_res.text), ('jobservice', js_res.text)]
|
||||
|
||||
def testMetricsExist(self):
|
||||
for k, metric_text in self.get_metrics():
|
||||
|
||||
|
||||
|
||||
for metric_name in self.metrics[k]:
|
||||
print("Metric {} should exist in {} ".format(metric_name, k))
|
||||
self.assertTrue(metric_name in metric_text)
|
||||
|
Loading…
x
Reference in New Issue
Block a user