作者:BobbW
项目:bosu
func init() {
metadata.AddMetricMeta(
"bosun.email.sent", metadata.Counter, metadata.PerSecond,
"The number of email notifications sent by Bosun.")
metadata.AddMetricMeta(
"bosun.email.sent_failed", metadata.Counter, metadata.PerSecond,
"The number of email notifications that Bosun failed to send.")
}
作者:bridgewel
项目:bosu
func init() {
metadata.AddMetricMeta(
"bosun.schedule.lock_time", metadata.Counter, metadata.MilliSecond,
"Length of time spent waiting for or holding the schedule lock.")
metadata.AddMetricMeta(
"bosun.schedule.lock_count", metadata.Counter, metadata.Count,
"Number of times the given caller acquired the lock.")
}
作者:nicolle
项目:bosu
func init() {
metadata.AddMetricMeta("bosun.ping.resolved", metadata.Gauge, metadata.Bool,
"1=Ping resolved to an IP Address. 0=Ping failed to resolve to an IP Address.")
metadata.AddMetricMeta("bosun.ping.rtt", metadata.Gauge, metadata.MilliSecond,
"The number of milliseconds for the echo reply to be received. Also known as Round Trip Time.")
metadata.AddMetricMeta("bosun.ping.timeout", metadata.Gauge, metadata.Ok,
"0=Ping responded before timeout. 1=Ping did not respond before 5 second timeout.")
}
作者:pd
项目:bosu
func AggregateMeta(metric string, unit metadata.Unit, desc string) {
agStrings := []string{"avg", "count", "min", "median", "max", "95", "99"}
for _, ag := range agStrings {
if ag == "count" {
metadata.AddMetricMeta(metric+"_"+ag, metadata.Gauge, metadata.Count, "The number of samples per aggregation.")
continue
}
metadata.AddMetricMeta(metric+"_"+ag, metadata.Gauge, unit, desc)
}
}
作者:nicolle
项目:bosu
func init() {
metadata.AddMetricMeta("bosun.statefile.size", metadata.Gauge, metadata.Bytes,
"The total size of the Bosun state file.")
metadata.AddMetricMeta("bosun.check.duration", metadata.Gauge, metadata.Second,
"The number of seconds it took Bosun to check each alert rule.")
metadata.AddMetricMeta("bosun.check.err", metadata.Gauge, metadata.Error,
"The running count of the number of errors Bosun has received while trying to evaluate an alert expression.")
metadata.AddMetricMeta("bosun.actions", metadata.Gauge, metadata.Count,
"The running count of actions performed by individual users (Closed alert, Acknowledged alert, etc).")
}
作者:wenxiaoy
项目:bosu
func init() {
metadata.AddMetricMeta(
"bosun.alerts.current_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by current severity.")
metadata.AddMetricMeta(
"bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by last abnormal severity.")
metadata.AddMetricMeta(
"bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by acknowledgement status.")
metadata.AddMetricMeta(
"bosun.alerts.active_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by active status.")
}
作者:jm
项目:bosu
func init() {
miniprofiler.Position = "bottomleft"
miniprofiler.StartHidden = true
miniprofiler.Enable = func(r *http.Request) bool {
return r.Header.Get(miniprofilerHeader) != ""
}
metadata.AddMetricMeta("bosun.search.puts_relayed", metadata.Counter, metadata.Request,
"The count of api put requests sent to Bosun for relaying to the backend server.")
metadata.AddMetricMeta("bosun.search.datapoints_relayed", metadata.Counter, metadata.Item,
"The count of data points sent to Bosun for relaying to the backend server.")
metadata.AddMetricMeta("bosun.relay.bytes", metadata.Counter, metadata.BytesPerSecond,
"Bytes per second relayed from Bosun to the backend server.")
metadata.AddMetricMeta("bosun.relay.response", metadata.Counter, metadata.PerSecond,
"HTTP response codes from the backend server for request relayed through Bosun.")
}
作者:snowsnai
项目:bosu
func init() {
metadata.AddMetricMeta(
"bosun.alerts.current_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by current severity.")
metadata.AddMetricMeta(
"bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by last abnormal severity.")
metadata.AddMetricMeta(
"bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by acknowledgement status.")
metadata.AddMetricMeta(
"bosun.alerts.active_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by active status.")
collect.AggregateMeta("bosun.template.render", metadata.MilliSecond, "The amount of time it takes to render the specified alert template.")
}
作者:Skyscanne
项目:bosu
func init() {
metadata.AddMetricMeta(
"bosun.alerts.current_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by current severity.")
metadata.AddMetricMeta(
"bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by last abnormal severity.")
metadata.AddMetricMeta(
"bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by acknowledgement status.")
metadata.AddMetricMeta(
"bosun.alerts.active_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by active status.")
metadata.AddMetricMeta("alerts.acknowledgement_status_by_notification", metadata.Gauge, metadata.Alert,
"The number of alerts by acknowledgement status and notification. Does not reflect escalation chains.")
metadata.AddMetricMeta("alerts.oldest_unacked_by_notification", metadata.Gauge, metadata.Second,
"How old the oldest unacknowledged notification is by notification.. Does not reflect escalation chains.")
collect.AggregateMeta("bosun.template.render", metadata.MilliSecond, "The amount of time it takes to render the specified alert template.")
}
作者:pd
项目:bosu
// InitChan is similar to Init, but uses the given channel instead of creating a
// new one.
func InitChan(tsdbhost *url.URL, root string, ch chan *opentsdb.DataPoint) error {
if tchan != nil {
return fmt.Errorf("cannot init twice")
}
if err := checkClean(root, "metric root"); err != nil {
return err
}
u, err := tsdbhost.Parse("/api/put")
if err != nil {
return err
}
if strings.HasPrefix(u.Host, ":") {
u.Host = "localhost" + u.Host
}
tsdbURL = u.String()
metricRoot = root + "."
tchan = ch
go queuer()
go send()
go collect()
if DisableDefaultCollectors {
return nil
}
Set("collect.dropped", Tags, func() (i interface{}) {
slock.Lock()
i = dropped
slock.Unlock()
return
})
Set("collect.sent", Tags, func() (i interface{}) {
slock.Lock()
i = sent
slock.Unlock()
return
})
Set("collect.queued", Tags, func() (i interface{}) {
qlock.Lock()
i = len(queue)
qlock.Unlock()
return
})
Set("collect.alloc", Tags, func() interface{} {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
return ms.Alloc
})
Set("collect.goroutines", Tags, func() interface{} {
return runtime.NumGoroutine()
})
AggregateMeta(metricRoot+"collect.post.batchsize", metadata.Count, descCollectPostBatchSize)
AggregateMeta(metricRoot+"collect.post.duration", metadata.MilliSecond, descCollectPostDuration)
metadata.AddMetricMeta(metricRoot+"collect.alloc", metadata.Gauge, metadata.Bytes, descCollectAlloc)
metadata.AddMetricMeta(metricRoot+"collect.goroutines", metadata.Gauge, metadata.Count, descCollectGoRoutines)
metadata.AddMetricMeta(metricRoot+"collect.post.bad_status", metadata.Counter, metadata.PerSecond, descCollectPostBad)
metadata.AddMetricMeta(metricRoot+"collect.post.count", metadata.Counter, metadata.PerSecond, descCollectPostCount)
metadata.AddMetricMeta(metricRoot+"collect.post.error", metadata.Counter, metadata.PerSecond, descCollectPostError)
metadata.AddMetricMeta(metricRoot+"collect.post.restore", metadata.Counter, metadata.PerSecond, descCollectPostRestore)
metadata.AddMetricMeta(metricRoot+"collect.post.total_bytes", metadata.Counter, metadata.Bytes, descCollectPostTotalBytes)
metadata.AddMetricMeta(metricRoot+"collect.post.total_duration", metadata.Counter, metadata.MilliSecond, descCollectPostTotalDuration)
metadata.AddMetricMeta(metricRoot+"collect.queued", metadata.Gauge, metadata.Item, descCollectQueued)
metadata.AddMetricMeta(metricRoot+"collect.sent", metadata.Counter, metadata.PerSecond, descCollectSent)
metadata.AddMetricMeta(metricRoot+"collect.dropped", metadata.Counter, metadata.PerSecond, descCollectDropped)
// Make sure these get zeroed out instead of going unknown on restart
Add("collect.post.error", Tags, 0)
Add("collect.post.bad_status", Tags, 0)
Add("collect.post.restore", Tags, 0)
return nil
}
作者:BobbW
项目:bosu
func init() {
metadata.AddMetricMeta("tsdbrelay.udp.packets", metadata.Counter, metadata.Count, "Number of valid udp packets received")
metadata.AddMetricMeta("tsdbrelay.puts.relayed", metadata.Counter, metadata.Count, "Number of successful puts relayed")
metadata.AddMetricMeta("tsdbrelay.metadata.relayed", metadata.Counter, metadata.Count, "Number of successful metadata puts relayed")
}
作者:noblehn
项目:bosu
func init() {
metadata.AddMetricMeta("tsdbrelay.puts.relayed", metadata.Counter, metadata.Count, "Number of successful puts relayed")
metadata.AddMetricMeta("tsdbrelay.metadata.relayed", metadata.Counter, metadata.Count, "Number of successful metadata puts relayed")
}
作者:eswd
项目:bosu
//.........这里部分代码省略.........
for _, x := range conf.ExtraHop {
check(collectors.ExtraHop(x.Host, x.APIKey, x.FilterBy, x.FilterPercent))
}
if err != nil {
slog.Fatal(err)
}
collectors.KeepalivedCommunity = conf.KeepalivedCommunity
// Add all process collectors. This is platform specific.
collectors.WatchProcesses()
collectors.WatchProcessesDotNet()
if *flagFake > 0 {
collectors.InitFake(*flagFake)
}
collect.Debug = *flagDebug
util.Debug = *flagDebug
collect.DisableDefaultCollectors = conf.DisableSelf
c := collectors.Search(conf.Filter)
if len(c) == 0 {
slog.Fatalf("Filter %v matches no collectors.", conf.Filter)
}
for _, col := range c {
col.Init()
}
u, err := parseHost(conf.Host)
if *flagList {
list(c)
return
} else if *flagPrint {
u = &url.URL{Scheme: "http", Host: "localhost:0"}
} else if err != nil {
slog.Fatalf("invalid host %v: %v", conf.Host, err)
}
freq := time.Second * time.Duration(conf.Freq)
if freq <= 0 {
slog.Fatal("freq must be > 0")
}
collectors.DefaultFreq = freq
collect.Freq = freq
if conf.BatchSize < 0 {
slog.Fatal("BatchSize must be > 0")
}
if conf.BatchSize != 0 {
collect.BatchSize = conf.BatchSize
}
collect.Tags = conf.Tags.Copy().Merge(opentsdb.TagSet{"os": runtime.GOOS})
if *flagPrint {
collect.Print = true
}
if !*flagDisableMetadata {
if err := metadata.Init(u, *flagDebug); err != nil {
slog.Fatal(err)
}
}
cdp, cquit := collectors.Run(c)
if u != nil {
slog.Infoln("OpenTSDB host:", u)
}
if err := collect.InitChan(u, "scollector", cdp); err != nil {
slog.Fatal(err)
}
if version.VersionDate != "" {
v, err := strconv.ParseInt(version.VersionDate, 10, 64)
if err == nil {
go func() {
metadata.AddMetricMeta("scollector.version", metadata.Gauge, metadata.None,
"Scollector version number, which indicates when scollector was built.")
for {
if err := collect.Put("version", collect.Tags, v); err != nil {
slog.Error(err)
}
time.Sleep(time.Hour)
}
}()
}
}
if *flagBatchSize > 0 {
collect.BatchSize = *flagBatchSize
}
go func() {
const maxMem = 500 * 1024 * 1024 // 500MB
var m runtime.MemStats
for range time.Tick(time.Minute) {
runtime.ReadMemStats(&m)
if m.Alloc > maxMem {
panic("memory max reached")
}
}
}()
sChan := make(chan os.Signal)
signal.Notify(sChan, os.Interrupt)
<-sChan
close(cquit)
// try to flush all datapoints on sigterm, but quit after 5 seconds no matter what.
time.AfterFunc(5*time.Second, func() {
os.Exit(0)
})
collect.Flush()
}
作者:nicolle
项目:bosu
//.........这里部分代码省略.........
if err != nil {
slog.Fatalf("Error adding tag overrides: %s", err)
}
u, err := parseHost(conf.Host)
if *flagList {
list(c)
return
} else if *flagPrint {
u = &url.URL{Scheme: "http", Host: "localhost:0"}
} else if err != nil {
slog.Fatalf("invalid host %v: %v", conf.Host, err)
}
freq := time.Second * time.Duration(conf.Freq)
if freq <= 0 {
slog.Fatal("freq must be > 0")
}
collectors.DefaultFreq = freq
collect.Freq = freq
if conf.BatchSize < 0 {
slog.Fatal("BatchSize must be > 0")
}
if conf.BatchSize != 0 {
collect.BatchSize = conf.BatchSize
}
collect.Tags = conf.Tags.Copy().Merge(opentsdb.TagSet{"os": runtime.GOOS})
if *flagPrint {
collect.Print = true
}
if !*flagDisableMetadata {
if err := metadata.Init(u, *flagDebug); err != nil {
slog.Fatal(err)
}
}
cdp, cquit := collectors.Run(c)
if u != nil {
slog.Infoln("OpenTSDB host:", u)
}
collect.UseNtlm = conf.UseNtlm
if err := collect.InitChan(u, "scollector", cdp); err != nil {
slog.Fatal(err)
}
if collect.DisableDefaultCollectors == false && version.VersionDate != "" {
v, err := strconv.ParseInt(version.VersionDate, 10, 64)
if err == nil {
go func() {
metadata.AddMetricMeta("scollector.version", metadata.Gauge, metadata.None,
"Scollector version number, which indicates when scollector was built.")
for {
if err := collect.Put("version", collect.Tags, v); err != nil {
slog.Error(err)
}
time.Sleep(time.Hour)
}
}()
}
}
if *flagBatchSize > 0 {
collect.BatchSize = *flagBatchSize
}
if conf.MaxQueueLen != 0 {
if conf.MaxQueueLen < collect.BatchSize {
slog.Fatalf("MaxQueueLen must be >= %d (BatchSize)", collect.BatchSize)
}
collect.MaxQueueLen = conf.MaxQueueLen
}
maxMemMB := uint64(500)
if conf.MaxMem != 0 {
maxMemMB = conf.MaxMem
}
go func() {
var m runtime.MemStats
for range time.Tick(time.Second * 30) {
runtime.ReadMemStats(&m)
allocMB := m.Alloc / 1024 / 1024
if allocMB > maxMemMB {
slog.Fatalf("memory max runtime reached: (current alloc: %v megabytes, max: %v megabytes)", allocMB, maxMemMB)
}
//See proccess_windows.go and process_linux.go for total process memory usage.
//Note that in linux the rss metric includes shared pages, where as in
//Windows the private working set does not include shared memory.
//Total memory used seems to scale linerarly with m.Alloc.
//But we want this to catch a memory leak outside the runtime (WMI/CGO).
//So for now just add any runtime allocations to the allowed total limit.
maxMemTotalMB := maxMemMB + allocMB
if collectors.TotalScollectorMemoryMB > maxMemTotalMB {
slog.Fatalf("memory max total reached: (current total: %v megabytes, current runtime alloc: %v megabytes, max: %v megabytes)", collectors.TotalScollectorMemoryMB, allocMB, maxMemTotalMB)
}
}
}()
sChan := make(chan os.Signal)
signal.Notify(sChan, os.Interrupt)
<-sChan
close(cquit)
// try to flush all datapoints on sigterm, but quit after 5 seconds no matter what.
time.AfterFunc(5*time.Second, func() {
os.Exit(0)
})
collect.Flush()
}
作者:uro
项目:bosu
func init() {
metadata.AddMetricMeta("bosun.search.index_queue", metadata.Gauge, metadata.Count, "Number of datapoints queued for indexing to redis")
metadata.AddMetricMeta("bosun.search.dropped", metadata.Counter, metadata.Count, "Number of datapoints discarded without being saved to redis")
}
作者:couchan
项目:bosu
//.........这里部分代码省略.........
check(collectors.ICMP(i.Host))
}
for _, a := range conf.AWS {
check(collectors.AWS(a.AccessKey, a.SecretKey, a.Region))
}
for _, v := range conf.Vsphere {
check(collectors.Vsphere(v.User, v.Password, v.Host))
}
for _, p := range conf.Process {
check(collectors.AddProcessConfig(p))
}
for _, h := range conf.HTTPUnit {
if h.TOML != "" {
check(collectors.HTTPUnitTOML(h.TOML))
}
if h.Hiera != "" {
check(collectors.HTTPUnitHiera(h.Hiera))
}
}
if err != nil {
slog.Fatal(err)
}
collectors.KeepalivedCommunity = conf.KeepalivedCommunity
// Add all process collectors. This is platform specific.
collectors.WatchProcesses()
collectors.WatchProcessesDotNet()
if *flagFake > 0 {
collectors.InitFake(*flagFake)
}
collect.Debug = *flagDebug
util.Debug = *flagDebug
collect.DisableDefaultCollectors = conf.DisableSelf
c := collectors.Search(conf.Filter)
if len(c) == 0 {
slog.Fatalf("Filter %v matches no collectors.", conf.Filter)
}
for _, col := range c {
col.Init()
}
u, err := parseHost(conf.Host)
if *flagList {
list(c)
return
} else if err != nil {
slog.Fatalf("invalid host %v: %v", conf.Host, err)
}
freq := time.Second * time.Duration(conf.Freq)
if freq <= 0 {
slog.Fatal("freq must be > 0")
}
collectors.DefaultFreq = freq
collect.Freq = freq
collect.Tags = opentsdb.TagSet{"os": runtime.GOOS}
if *flagPrint {
collect.Print = true
}
if !*flagDisableMetadata {
if err := metadata.Init(u, *flagDebug); err != nil {
slog.Fatal(err)
}
}
cdp := collectors.Run(c)
if u != nil {
slog.Infoln("OpenTSDB host:", u)
}
if err := collect.InitChan(u, "scollector", cdp); err != nil {
slog.Fatal(err)
}
if version.VersionDate != "" {
v, err := strconv.ParseInt(version.VersionDate, 10, 64)
if err == nil {
go func() {
metadata.AddMetricMeta("scollector.version", metadata.Gauge, metadata.None,
"Scollector version number, which indicates when scollector was built.")
for {
if err := collect.Put("version", collect.Tags, v); err != nil {
slog.Error(err)
}
time.Sleep(time.Hour)
}
}()
}
}
if *flagBatchSize > 0 {
collect.BatchSize = *flagBatchSize
}
go func() {
const maxMem = 500 * 1024 * 1024 // 500MB
var m runtime.MemStats
for range time.Tick(time.Minute) {
runtime.ReadMemStats(&m)
if m.Alloc > maxMem {
panic("memory max reached")
}
}
}()
select {}
}