(index=_internal OR index=core_splunk_internal) sourcetype=splunkd (index=_internal OR index=core_splunk_internal) (sourcetype=metrics OR sourcetype=splunkd) METRICS 19 $generated_search$
label search $splunkd$ CMMaster status=success site* earliest=-4hr latest=now source=*splunkd.log* | rex field=_raw max_match=64 "(?<site_pair>site\d+,\"?[^\",]+)" | rex field=_raw "peer_name=(?<single_site>[^\s]+)" | eval site_pair=if(isnull(site_pair),"site,".single_site,site_pair) | eval cluster_master=host | fields + site_pair cluster_master | fields - _* | dedup site_pair | mvexpand site_pair | dedup site_pair | rex field=site_pair "^(?<site_id>site[^,]*),\"?(?<indexer>.*)" | rex field=cluster_master "^(?<short_name_cm>[^\.]+)" | eval search="host=".indexer, host_count=1 | appendpipe [| stats values(indexer) as indexers by site_id short_name_cm | eval host_count=mvcount(indexers), search="host IN (".mvjoin(mvfilter(indexers!=""), ", ").")" | eval label=site_id." (".host_count." idxs @ ".short_name_cm ] | appendpipe [| stats values(indexer) as indexers dc(site_id) as site_count by short_name_cm | eval host_count=mvcount(indexers), search="host IN (".mvjoin(mvfilter(indexers!=""), ", ").")" | eval label=short_name_cm." (".host_count." idx ".site_count." sites)" ] | rex field=indexer "^(?<short_name_idx>[^\.]+)" | eval label=if(isnull(label), short_name_idx." (".site_id."@".short_name_cm.")", label) | stats max(host_count) as count by label search | sort 0 - count -24h@h now $selected_indexers$ None None label index | eventcount index=* index=_* summarize=false | rename server as splunk_server | search $selected_targets$ | stats sum(count) as size by index | sort - size | eval label=index."(".size.")" -24h@h now main ( ) index= OR 1 2.8 60 60
6. Rate of increase of time based on power, select number of iterations 20 Rate of time increase in seconds - click on column | makeresults | eval step=mvrange(1,$max_samples_to_offer$,1) | mvexpand step | eval "time range in seconds"=$step_size$*pow(step,$power$) | fields step "time range in seconds" | fields - _time -24h@h now $click.value$ 7. Click on the duration to execute the search When you click this link a search is generated in SPL and written to the base search for execution. The format is "days+hours:minutes:seconds", for example you can translate i.e 10+6:30:00 into 10 days and 6 hours 30 mins. It is best to measure event distribution over shorter lengths for instance an hour, if you aren't getting good event distribution within this time the platform needs tuning. | makeresults | eval step=mvrange(1,$steps$,1) | mvexpand step | eval step_size=round($step_size$*pow(step,$power$)) | eval jump=step_size | fields step_size jump step | eval tstats_preamble=if(step==1,"| tstats prestats=t","| tstats prestats=t append=t") | eval tstats_search=" ".tstats_preamble." count max(_time) as latest_time min(_time) as min_time where earliest=-".(jump+$offset$)."sec latest=-$offset$sec $selected_targets$ $selected_index$ by splunk_server index | eval period=if(isNull(period),\"".step_size."\",period) ", step_string=tostring(step_size,"duration") | fields - jump tstats_preamble | stats last(step_string) as max_history list(*) as * | eval tstats_search=mvjoin(tstats_search," ")." | stats count by period splunk_server index" | eventstats last(step_string) as max_history_str last(step_size) as max_history | eval step_string=mvjoin(step_string,","), step_size=mvjoin(step_size,","), step=mvjoin(step,",") | eval post_process="| stats sum(count) as count by period splunk_server index" | eval padding_search="[| tstats prestats=t count where earliest=-".max_history."sec latest=-60sec $selected_index$ by splunk_server index | stats count by splunk_server index | eval count=0 | eval period=\"".step_size."\" | makemv delim=\",\" period | mvexpand period]" | eval search=tstats_search | fields max_history_str search step_string -24h@h now $row.search$ You are viewing $steps$ steps starting with a period of $step_size$ seconds increasing at the power $power$ and spliting the data by index Variation of events across the indexers $selected_index$ label index | stats count by index | eval label=index." (".count.")" * ( ) index=" " OR * Variation of events across the indexers | search $filter_index$ | eval period=tostring(period,"duration") | stats sum(count) as count by period splunk_server | xyseries splunk_server period count What was the normalised standard deviation over time? We want this to bottom out ASAP | eventstats dc(splunk_server) as known_servers | stats dc(splunk_server) as servers_in_period avg(count) as avg var(count) as varience by period index known_servers | eval missing_servers_in_period = known_servers - servers_in_period, fixed_variance=(servers_in_period*varience+pow(missing_servers_in_period*avg,2))/known_servers, fixed_stdev=sqrt(fixed_variance) | eval normalized_stdev=fixed_stdev/avg | fields - avg fixed_stdev fixed_variance varience | chart limit=100 values(normalized_stdev) as normalized_stdev by period index | eval period=tostring(period,"duration") How many indexers received data during each period? We need this to ramp up as quickly as possible | where count!=0 | chart limit=100 dc(splunk_server) by period index | eval period=tostring(period,"duration") Events scanned in each step Events scanned goes up quickly | chart limit=100 sum(count) as ratio by period index | eval period=tostring(period,"duration")