-
-
Save dmsnell/bfcec327f1a8235ba57b9253f218f044 to your computer and use it in GitHub Desktop.
Comparing CI test runs across different branches
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env Rscript | |
library("car") | |
library("ggplot2") | |
library("ggpubr") | |
library("parsedate") | |
library("patchwork") | |
library("ggstatsplot") | |
human_time <- function (total_minutes) { | |
minutes <- floor(abs(total_minutes)) | |
seconds <- round((abs(total_minutes) - minutes) * 60) | |
sign <- sign(total_minutes) | |
mag <- if (minutes == 0) { | |
paste(seconds, "s", sep="") | |
} else if (seconds == 0) { | |
paste(minutes, "m", sep="") | |
} else { | |
paste(minutes, "m ", seconds, "s", sep="") | |
} | |
if (sign == 0) { | |
"equal" | |
} else if (sign > 0) { | |
paste("↑", mag) | |
} else { | |
paste("↓", mag) | |
} | |
} | |
args <- commandArgs(trailingOnly = TRUE) | |
branch1 <- args[1] | |
branch2 <- args[2] | |
name1 <- args[3] | |
name2 <- args[4] | |
pr1 <- args[5] | |
pr2 <- args[6] | |
branch1 <- read.csv(branch1, header = FALSE, col.names = c("Time", "Duration")) | |
branch2 <- read.csv(branch2, header = FALSE, col.names = c("Time", "Duration")) | |
branch1 <- transform(branch1, Time = parse_iso_8601(Time)) | |
branch1 <- transform(branch1, Duration = Duration / 60) | |
branch1 <- transform(branch1, Branch = name1) | |
branch2 <- transform(branch2, Time = parse_iso_8601(Time)) | |
branch2 <- transform(branch2, Duration = Duration / 60) | |
branch2 <- transform(branch2, Branch = name2) | |
data <- rbind(branch1, branch2) | |
delta_mean <- mean(branch1$Duration) - mean(branch2$Duration) | |
ttest <- t.test(branch1$Duration, branch2$Duration, alternative = "less") | |
p_value <- format(round(ttest$p.value, 3), nsmall=3) | |
print(ttest) | |
print(wilcox.test(branch1$Duration, branch2$Duration, alternative = "less")) | |
print(shapiro.test(branch1$Duration)) | |
print(shapiro.test(branch2$Duration)) | |
p_hist <- ( | |
ggplot(data, aes(x = Duration, fill = Branch)) | |
+ geom_histogram(aes(y = after_stat(density)), bins = 80, position = "identity", alpha = 0.4) | |
+ scale_x_continuous(breaks = seq(0, 80, 5), limits = c(10, 35)) | |
+ stat_function(fun = dnorm, args = list(mean = mean(branch1$Duration), sd = sd(branch1$Duration)), color = "red") | |
+ geom_vline(xintercept = mean(branch1$Duration), color="red", linetype="dashed") | |
+ stat_function(fun = dnorm, args = list(mean = mean(branch2$Duration), sd = sd(branch2$Duration)), color = "blue") | |
+ geom_vline(xintercept = mean(branch2$Duration), color="blue", linetype="dashed") | |
+ ggtitle( | |
"Comparing branches", | |
paste("p(", name1, " is not faster) = ", p_value, ", mean diff = ", human_time(delta_mean), sep="") | |
) | |
+ ylab("Relative Percentage of Runs") | |
+ xlab("Runtime Duration (minutes)") | |
+ theme( | |
axis.title = element_text(size=8), | |
legend.position = "bottom" | |
) | |
) | |
p_boxes <- ( | |
ggbetweenstats( | |
data = data, | |
x = Branch, | |
y = Duration, | |
conf.level = 0.999, | |
point.args.size = 1, | |
plot.type = "boxviolin", # for boxplot | |
type = "parametric", # for student's t-test | |
var.equal = FALSE, # unequal variances | |
centrality.plotting = FALSE # remove mean | |
) | |
+ labs(caption = NULL) # remove caption | |
+ ylab(NULL) | |
) | |
p_qq <- ( | |
ggplot(data, aes(sample = Duration)) | |
+ geom_qq() | |
+ geom_qq_line() | |
+ facet_wrap(~Branch) | |
) | |
qq_theme = theme( | |
axis.title = element_text(size = 8), | |
axis.text = element_text(size = 7) | |
) | |
qqp1 <- ( | |
ggqqplot(branch1$Duration) | |
+ qq_theme | |
+ ylab(name1) | |
+ xlab("Normality") | |
) | |
qqp2 <- ( | |
ggqqplot(branch2$Duration) | |
+ qq_theme | |
+ ylab(name2) | |
+ xlab("Normality") | |
) | |
qqp <- qqp1 + qqp2 | |
print(p_hist + (p_boxes / qqp)) | |
ggsave(paste("~/Downloads/branch-compare-", pr1, "-", pr2, ".png", sep = ""), width = 10, height = 6, units = "in", dpi = 300) | |
dev.off() | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
%%% @doc Poking around at the Gutenberg repository in GitHub Actions | |
%%% | |
%%% ## Performance test run timing | |
%%% | |
%%% 1. Fetch "Performance Tests" runs | |
%%% AllRuns <= /actions/workflows/1831909/runs?status=completed&event=pull_request | |
%%% | |
%%% 2. For each one, fetch the runtime values | |
%%% CheckSuiteId = AllRuns.check_suite_id | |
%%% Run <= /check-suites/[CheckSuiteId]/check-runs | |
%%% PR_Title = Run.display_title | |
%%% Branch = Run.head_branch | |
%%% PR_Number = Run.pull_requests[0].number | |
%%% #{ conclusion, started_at, completed_at } = Run | |
%%% Runtime = parse8601( completed_at ) - parse8601( started_at ) | |
%%% | |
-module(gh_actions). | |
-export([ | |
branches/0, | |
branch_compare/2, | |
branch_compare/3, | |
branch_csv/1, | |
branch_csv/2, | |
branch_report/0, | |
chart/1, | |
curl/1, | |
curl/2, | |
get/1, | |
get/2, | |
last_n/2, | |
perf_test_runs/1, | |
all_perf_runs/0, | |
all_perf_runs/1, | |
boogey/1, | |
duration_data/1, | |
job_runtime_summary/1, | |
jobs_for_branch/1, | |
to/2, | |
job_summaries_to_excel/1, | |
column/2, | |
runtime_summary/1, | |
rs_s_to_m/1 | |
]). | |
-define(PAT, <<"YOUR_PERSONAL_ACCESS_TOKEN_HERE">>). | |
-define(BASE_URL, <<"https://api.github.com/repos/wordpress/gutenberg">>). | |
% -define(PERF_WORKFLOW_ID, 1831909). | |
-define(PERF_WORKFLOW_ID, 1701506). | |
% -define(E2E_WORKFLOW_ID, 1701506). | |
-record(branch, { | |
pr :: non_neg_integer(), | |
branch :: list(), | |
name :: list(), | |
max_n = no_limit :: integer(), | |
disabled = false :: boolean() | |
}). | |
% | |
% Performance test runs | |
% | |
branch(trunk) -> #branch{ | |
pr = 44907, | |
branch = "tests/measure-perf-test-runtimes", | |
name = "Trunk", | |
max_n = 29 | |
}; | |
branch(without) -> #branch{ | |
pr = 45175, | |
branch = "perf-tests/without-changes", | |
name = "Without Optimizations", | |
max_n = 23 | |
}; | |
branch(no_types) -> #branch{ | |
pr = 45284, | |
branch = "tests/stop-building-types-during-perf-tests", | |
name = "No Build Types", | |
max_n = 28, | |
disabled = true | |
}; | |
branch(local) -> #branch{ | |
pr = 45188, | |
branch = "tests/clone-from-local-checkout", | |
name = "Local Checkout", | |
max_n = 25, | |
disabled = true | |
}; | |
branch(reuse_branches) -> #branch{ | |
pr = 45737, | |
branch = "tests/reuse-tests-branch-build-if-possible", | |
name = "Reuse Tests Branch", | |
max_n = 17, | |
disabled = true | |
}; | |
branch(store_results) -> #branch{ | |
pr = 45747, | |
branch = "tests/store-perf-test-results", | |
name = "Store raw results", | |
max_n = no_limit | |
}; | |
branch(run_1x) -> #branch{ | |
pr = 45761, | |
branch = "tests/only-run-1-round-perf-test-in-prs", | |
name = "Only 1 test round in PRs", | |
max_n = 2, | |
disabled = true | |
}; | |
branch(refspecs) -> #branch{ | |
pr = 45780, | |
branch = "tests/perf-tests-fetch-all-at-once-with-branches", | |
name = "Single fetch with refspecs", | |
max_n = no_limit, | |
disabled = true | |
}; | |
branch(fix_types) -> #branch{ | |
pr = 45922, | |
branch = "tests/fix-perf-test-skipping-wp-scripts", | |
name = "Fix types", | |
max_n = no_limit, | |
disabled = true | |
}; | |
branch(parallel) -> #branch{ | |
pr = 45923, | |
branch = "tests/run-in-separate-processes", | |
name = "Separate processes", | |
max_n = no_limit, | |
disabled = true | |
}; | |
branch(cached_ci) -> #branch{ | |
pr = 45932, | |
branch = "try/setup-node-composite-action", | |
name = "Cache npm dependencies", | |
max_n = no_limit | |
}. | |
branches() -> | |
Branches = [ | |
% trunk, without, no_types, local, reuse_branches, | |
% store_results, run_1x, refspecs, fix_types, parallel, | |
trunk, cached_ci | |
], | |
[branch(Branch) || Branch <- Branches]. | |
branch_report() -> | |
Branches = [Branch || #branch{disabled = false} = Branch <- branches()], | |
Self = self(), | |
Get = fun | |
(Branch) -> | |
[#{ | |
<<"status">> := Status, | |
<<"rerun_url">> := RerunURL, | |
<<"run_started_at">> := StartedAt | |
}] = get_workflow_runs(#{ | |
branch => Branch#branch.branch, | |
per_page => 1 | |
}), | |
Self ! {Branch, Status, RerunURL, StartedAt} | |
end, | |
[spawn(fun () -> Get(Branch) end) || Branch <- Branches], | |
Responses = lists:sort( | |
fun ({#branch{pr = A},_,_,_}, {#branch{pr = B},_,_,_}) -> A =< B end, | |
[receive R -> R end || _ <- Branches] | |
), | |
[ | |
begin | |
io:format("Re-running ~ts~n", [Branch]), | |
spawn(sp_httpc, post, [RerunURL, maps:merge(headers(), #{<<"content-type">> => <<"text/plain">>}), {binary, <<>>}]) | |
end | |
|| | |
{#branch{branch = Branch}, Status, RerunURL, _StartedAt} <- Responses, | |
<<"completed">> == Status | |
], | |
[ | |
io:format( | |
"(~ts~ts) for ~ts~n", | |
[ | |
Status, | |
case Status of | |
<<"in_progress">> -> | |
Now = os:system_time(seconds), | |
Delta = Now - calendar:rfc3339_to_system_time(binary_to_list(StartedAt), [{unit, seconds}]), | |
Since = if | |
Delta >= 3600 -> io_lib:format("~bh ~bm", [round(Delta / 3600), round((Delta rem 3600) / 60)]); | |
Delta >= 60 -> io_lib:format("~bm", [round(Delta / 60)]); | |
true -> io_lib:format("~bs", [Delta]) | |
end, | |
[" ", Since]; | |
_ -> | |
<<>> | |
end, | |
Branch | |
] | |
) | |
|| | |
{#branch{branch = Branch}, Status, _, StartedAt} <- Responses, | |
<<"completed">> /= Status | |
], | |
ok. | |
branch_compare(Control, Treatment) -> | |
branch_compare(node(), Control, Treatment). | |
branch_compare(Node, Control, Treatment) -> | |
{ok, ControlCSV} = rpc:call(Node, gh_actions, branch_csv, [Control]), | |
{ok, TreatmentCSV} = rpc:call(Node, gh_actions, branch_csv, [Treatment]), | |
ControlPath = io_lib:format("/Users/dmsnell/Downloads/branch-~p.csv", [(branch(Control))#branch.pr]), | |
TreatmentPath = io_lib:format("/Users/dmsnell/Downloads/branch-~p.csv", [(branch(Treatment))#branch.pr]), | |
ok = file:write_file(ControlPath, ControlCSV), | |
ok = file:write_file(TreatmentPath, TreatmentCSV), | |
Cmd = io_lib:format( | |
"/Users/dmsnell/Downloads/branch-compare.r ~s ~s \"~s\" \"~s\" ~b ~b", | |
[ | |
TreatmentPath, | |
ControlPath, | |
(branch(Treatment))#branch.name, | |
(branch(Control))#branch.name, | |
(branch(Treatment))#branch.pr, | |
(branch(Control))#branch.pr | |
] | |
), | |
io:format("~ts~n", [Cmd]), | |
Out = os:cmd(Cmd), | |
io:format("~ts~n", [Out]), | |
ok. | |
branch_csv(BranchName) -> | |
branch_csv(BranchName, (branch(BranchName))#branch.max_n). | |
branch_csv(BranchName, Limit) -> | |
#branch{branch = Branch} = branch(BranchName), | |
AllJobs = gh_actions:jobs_for_branch(list_to_binary(Branch)), | |
NewerThan = calendar:rfc3339_to_system_time("2022-11-24T15:45:07-07:00"), | |
Jobs = case Limit of | |
no_limit -> AllJobs; | |
% N -> last_n(N, AllJobs) | |
_N -> [J || #{start_at := At} = J <- AllJobs, calendar:rfc3339_to_system_time(binary_to_list(At)) > NewerThan] | |
end, | |
CSV = io_lib:format("~s~n", [[io_lib:format("~s,~p~n", [S, D]) || #{start_at := S, duration := D} <- Jobs]]), | |
{ok, CSV}. | |
%% @doc Aggregate all Performance Test jobs for a given branch. | |
%% | |
%% 1. /actions/workflows/1831909/runs?branch=BRANCH | |
%% This API response may contain the last run for each conclusion | |
%% | |
%% Runs = response.workflow_runs[.check_suite_url] | |
%% | |
%% "https://api.github.com/repos/WordPress/gutenberg/check-suites/8885306091" | |
%% "https://api.github.com/repos/WordPress/gutenberg/check-suites/8885273009" | |
%% "https://api.github.com/repos/WordPress/gutenberg/check-suites/8822785802" | |
%% "https://api.github.com/repos/WordPress/gutenberg/check-suites/8746662957" | |
%% | |
%% 2. For each check suite run, get the details URL | |
%% and parse the workflow run id | |
%% | |
%% "https://github.com/WordPress/gutenberg/actions/runs/3293417414/jobs/5430126561" | |
%% "https://github.com/WordPress/gutenberg/actions/runs/3293405180/jobs/5429871820" | |
%% "https://github.com/WordPress/gutenberg/actions/runs/3270003587/jobs/5430003849" | |
%% "https://github.com/WordPress/gutenberg/actions/runs/3238266544/jobs/5377308452" | |
%% ---------- | |
%% run id | |
%% | |
%% 3. For each run id, get the attempt number .run_attempt | |
%% For run_id, 3238266544, GET "https://api.github.com/repos/wordpress/gutenberg/actions/runs/3238266544" | |
%% "run_attempt: 7" | |
%% | |
%% 4. For each run id attempt, fetch the jobs | |
%% "https://api.github.com/repos/wordpress/gutenberg/actions/runs/3238266544/attempts/7/jobs" | |
%% | |
jobs_for_branch(Branch) when is_atom(Branch) -> | |
jobs_for_branch((branch(Branch))#branch.branch); | |
jobs_for_branch(Branch) -> | |
WorkflowRuns = get_workflow_runs(#{branch => Branch, event => <<"pull_request">>}), | |
CheckSuiteIds = get_check_suite_ids(WorkflowRuns), | |
RunIds = lists:flatten([get_check_suite_run_ids(Id) || Id <- CheckSuiteIds]), | |
Attempts = [ | |
{RunId, lists:seq(1, get_latest_run_attempt(RunId))} | |
|| | |
RunId <- RunIds | |
], | |
Jobs = lists:flatten([ | |
[get_jobs(RunId, AttemptId) || AttemptId <- AttemptIds] | |
|| | |
{RunId, AttemptIds} <- Attempts | |
]), | |
[job_runtime_summary(Job) || #{<<"conclusion">> := <<"success">>} = Job <- Jobs]. | |
get_workflow_runs(Args) -> | |
{ok, #{body := #{<<"workflow_runs">> := Runs}}} = get(<<"/actions/workflows/", (integer_to_binary(?PERF_WORKFLOW_ID))/binary, "/runs">>, Args), | |
Runs. | |
get_check_suite_ids(WorkflowRuns) -> | |
[ | |
begin | |
Parts = string:lexemes(URL, "/"), | |
[Id | _] = lists:reverse(Parts), | |
binary_to_integer(Id) | |
end | |
|| | |
#{<<"check_suite_url">> := URL} <- WorkflowRuns | |
]. | |
get_check_suite_run_ids(CheckSuiteId) -> | |
{ok, #{body := #{<<"check_runs">> := Runs}}} = ?MODULE:get(<<"/check-suites/", (integer_to_binary(CheckSuiteId))/binary, "/check-runs">>), | |
[ | |
begin | |
Parts = string:lexemes(URL, "/"), | |
[_JobsId, <<"jobs">>, RunId | _] = lists:reverse(Parts), | |
binary_to_integer(RunId) | |
end | |
|| | |
#{<<"details_url">> := URL} <- Runs | |
]. | |
get_latest_run_attempt(RunId) -> | |
{ok, #{body := #{<<"run_attempt">> := RunAttempt}}} = ?MODULE:get(<<"/actions/runs/", (integer_to_binary(RunId))/binary>>), | |
RunAttempt. | |
get_jobs(RunId, AttemptId) -> | |
Run = integer_to_binary(RunId), | |
Attempt = integer_to_binary(AttemptId), | |
{ok, #{body := #{<<"jobs">> := Jobs}}} = ?MODULE:get(<<"/actions/runs/", Run/binary, "/attempts/", Attempt/binary, "/jobs">>), | |
Jobs. | |
job_runtime_summary(#{ | |
<<"started_at">> := Start, | |
<<"completed_at">> := End, | |
<<"conclusion">> := Status | |
}) -> | |
#{ | |
conclusion => Status, | |
duration => to(seconds, End) - to(seconds, Start), | |
start_at => Start | |
}. | |
job_summaries_to_excel(Summaries) -> | |
Sorted = lists:sort( | |
fun | |
( #{start_at := A}, #{start_at := B} ) -> | |
to(seconds, A) >= to(seconds, B) | |
end, | |
Summaries | |
), | |
[ | |
io_lib:format("~p,~p~n", [to(excel_date, S), D/60]) | |
|| | |
#{start_at := S, duration := D, conclusion := <<"success">>} <- Sorted | |
]. | |
perf_test_runs(Branch) -> | |
{ok, #{body := #{<<"workflow_runs">> := Runs}}} = get( | |
<<"/actions/runs">>, | |
#{ | |
branch => Branch, | |
event => <<"push">> | |
} | |
), | |
PerfRuns = [Run || #{<<"workflow_id">> := 1831909} = Run <- Runs], | |
RunSummaries = workflow_runs_summary(PerfRuns), | |
Data = [with_jobs_data(Summary) || Summary <- RunSummaries], | |
#{ | |
data => Data, | |
success_summary => runtime_summary(lists:flatten([[D || #{<<"duration">> := D} <- Jobs] || #{<<"conclusion">> := <<"success">>, <<"jobs">> := Jobs} <- Data])), | |
failure_summary => runtime_summary(lists:flatten([[D || #{<<"duration">> := D} <- Jobs] || #{<<"conclusion">> := <<"failure">>, <<"jobs">> := Jobs} <- Data])) | |
}. | |
duration_data(Data) -> | |
Jobs = lists:flatten([Job || #{<<"conclusion">> := <<"success">>, <<"jobs">> := Job} <- Data]), | |
SDs = [{S, D} || #{<<"started_at">> := S, <<"duration">> := D} <- Jobs], | |
SDs. | |
to(Unit, TSbinary) when is_binary(TSbinary) -> | |
to(Unit, binary_to_list(TSbinary)); | |
to(seconds, TS) -> | |
calendar:rfc3339_to_system_time(TS); | |
to(days, TS) -> | |
to(seconds, TS) / 86400; | |
to(excel_date, TS) -> | |
to(days, TS) - to(days, "1989-12-30T00:00:00Z"); | |
to(human_percent, V) when V < 1.0 -> | |
io_lib:format("~.3. g", [V * 100]); | |
to(human_percent, V) when V >= 1.0 -> | |
io_lib:format("~b", [round(V * 100)]). | |
column(Name, List) -> | |
[maps:get(Name, Item) || Item <- List]. | |
runtime_summary([]) -> | |
nil; | |
runtime_summary([A]) -> | |
#{ | |
mean => A, | |
median => A, | |
stddev => nil, | |
min => A, | |
'N' => 1 | |
}; | |
runtime_summary([A, B]) -> | |
Mean = (A + B) / 2, | |
#{ | |
mean => Mean, | |
median => Mean, | |
stddev => math:sqrt(math:pow(A - Mean, 2) + math:pow(B - Mean, 2)) / 2, | |
min => min(A, B), | |
'N' => 2 | |
}; | |
runtime_summary([First | _Rest] = Durations) -> | |
Sorted = lists:sort(Durations), | |
Mean = lists:sum(Sorted) / length(Sorted), | |
LowerMid = (length(Sorted) div 2) + 1, | |
Median = case length(Sorted) rem 2 of | |
0 -> (lists:nth(LowerMid, Sorted) + lists:nth(LowerMid + 1, Sorted)) / 2; | |
1 -> lists:nth(LowerMid, Sorted) | |
end, | |
#{ | |
mean => Mean, | |
median => Median, | |
stddev => math:sqrt(lists:sum([math:pow(X - Mean, 2) || X <- Sorted]) / length(Sorted)), | |
min => lists:foldl(fun erlang:min/2, First, Durations), | |
'N' => length(Durations) | |
}. | |
rs_s_to_m(Summary) -> | |
Summary#{ | |
mean => s_to_human(maps:get(mean, Summary)), | |
median => s_to_human(maps:get(median, Summary)), | |
stddev => s_to_human(maps:get(stddev, Summary)), | |
min => s_to_human(maps:get(min, Summary)) | |
}. | |
last_n(N, List) -> | |
Length = length(List), | |
lists:sublist(List, 1 + max(0, Length - N), N). | |
% | |
% Chart on https://github.com/WordPress/gutenberg/pull/45175 | |
% | |
chart(45175) -> | |
AllBranches = #{ | |
<<"perf-tests/without-changes">> => #{ | |
order => 1, | |
name => <<"Without changes #45175">> | |
}, | |
<<"tests/measure-perf-test-runtimes">> => #{ | |
order => 2, | |
name => <<"Empty change #44907">> | |
}, | |
<<"tests/run-perf-tests-in-band">> => #{ | |
disabled => true, | |
order => 3, | |
name => <<"🚫 --runInBand #44905"/utf8>> | |
}, | |
<<"tests/shallow-fetch-merge-branch">> => #{ | |
disabled => true, | |
order => 4, | |
name => <<"✅ --depth=2 at merge branch #45057"/utf8>> | |
}, | |
<<"tests/stop-building-types-during-perf-tests">> => #{ | |
order => 5, | |
name => <<"No-generate-types #45284">> | |
}, | |
<<"tests/clone-from-local-checkout">> => #{ | |
order => 6, | |
name => <<"Clone local checkout #45188">> | |
}, | |
<<"tests/refactor-use-of-env">> => #{ | |
disabled => true, | |
order => 7, | |
name => <<"Refactor use of ENV #45255">> | |
}, | |
<<"tests/skip-tests-on-merge-conflict">> => #{ | |
order => 8, | |
name => <<"Skip on merge conflict #45728">> | |
}, | |
<<"tests/reuse-tests-branch-build-if-possible">> => #{ | |
order => 9, | |
name => <<"Reuse tests-branch build #45737">> | |
} | |
}, | |
Branches = maps:filter(fun (_K, V) -> not is_map_key(disabled, V) end, AllBranches), | |
Self = self(), | |
ok = maps:foreach( | |
fun (Branch, _Config) -> | |
spawn(fun () -> Self ! {Branch, jobs_for_branch(Branch)} end) | |
end, | |
Branches | |
), | |
WithJobs = maps:map( | |
fun | |
(Branch, Data) -> | |
receive | |
{Branch, Jobs} -> | |
Durations = last_n(15, column(duration, Jobs)), | |
% Durations = column(duration, Jobs), | |
Data#{ | |
jobs => Jobs, | |
durations => Durations, | |
summary => runtime_summary(Durations) | |
} | |
end | |
end, | |
Branches | |
), | |
#{ | |
<<"perf-tests/without-changes">> := #{summary := #{mean := Control}}, | |
<<"tests/measure-perf-test-runtimes">> := #{summary := #{mean := Trunk}} | |
} = WithJobs, | |
WithComparison = maps:map( | |
fun (_, #{summary := #{mean := Mean}} = Data) -> | |
Data#{ | |
vs_control => {Mean / Control, Mean - Control}, | |
vs_trunk => {Mean / Trunk, Mean - Trunk} | |
} | |
end, | |
WithJobs | |
), | |
R1 = maps:to_list(WithComparison), | |
R2 = lists:sort(fun ({_, #{order := A}}, {_, #{order := B}}) -> A =< B end, R1), | |
lists:foreach( | |
fun ({_, #{name := Name, summary := S, vs_control := {VC, VCs}, vs_trunk := {VT, VTs}}}) -> | |
#{ | |
'N' := N, | |
mean := Mean, | |
median := Median, | |
stddev := StdDev, | |
min := Min | |
} = rs_s_to_m(S), | |
io:format( | |
"| ~ts | ~p | ~s / ~s | ~s | ~s | ~s ~s% | ~s ~s% |~n", | |
[Name, N, Mean, Median, Min, StdDev, s_to_human(VCs), to(human_percent, VC), s_to_human(VTs), to(human_percent, VT)] | |
) | |
end, | |
R2 | |
), | |
{ok, WithComparison}. | |
s_to_human(nil) -> | |
nil; | |
s_to_human(Seconds) -> | |
Abs = abs(Seconds), | |
Minutes = floor(Abs / 60), | |
Secs = floor(Abs) rem 60, | |
Sign = case Seconds >= 0 of true -> ""; false -> "-" end, | |
list_to_binary(io_lib:format("~s~pm ~ps", [Sign, Minutes, Secs])). | |
boogey(Pages) -> | |
boogey(Pages, []). | |
boogey(0, Results) -> | |
lists:flatten(Results); | |
boogey(Pages, Results) -> | |
try | |
timer:sleep(1000), | |
#{data := Page} = all_perf_runs(#{page => Pages, event => <<"push">>, branch => <<"trunk">>}), | |
boogey(Pages - 1, [Page | Results]) | |
catch | |
Error:Data -> | |
io:format(" >> Error, trying after 5s: ~p; ~p~n", [Error, Data]), | |
timer:sleep(5000), | |
boogey(Pages, Results) | |
end. | |
all_perf_runs() -> | |
all_perf_runs(#{}). | |
all_perf_runs(QueryArgs) -> | |
{ok, #{body := #{<<"workflow_runs">> := Runs}}} = get( | |
<<"/actions/workflows/", (list_to_binary(io_lib:format("~p", [?PERF_WORKFLOW_ID])))/binary, "/runs">>, | |
maps:merge( | |
#{ | |
event => <<"pull_request">>, | |
status => <<"success">>, | |
per_page => 100 | |
}, | |
QueryArgs | |
) | |
), | |
RunSummaries = workflow_runs_summary(Runs), | |
Data = [with_jobs_data(Summary) || Summary <- RunSummaries], | |
#{ | |
data => Data, | |
success_summary => runtime_summary(lists:flatten([[D || #{<<"duration">> := D} <- Jobs] || #{<<"conclusion">> := <<"success">>, <<"jobs">> := Jobs} <- Data])), | |
failure_summary => runtime_summary(lists:flatten([[D || #{<<"duration">> := D} <- Jobs] || #{<<"conclusion">> := <<"failure">>, <<"jobs">> := Jobs} <- Data])) | |
}. | |
with_jobs_data(#{<<"jobs_url">> := <<"https://api.github.com/repos/WordPress/gutenberg", JobsURL/binary>>} = Run) -> | |
{ok, #{body := #{<<"jobs">> := JobsData}}} = ?MODULE:get(JobsURL), | |
WithJob = Run#{<<"jobs">> => [ | |
begin | |
Tic = calendar:rfc3339_to_system_time(binary_to_list(Start)), | |
Toc = calendar:rfc3339_to_system_time(binary_to_list(End)), | |
#{ | |
<<"conclusion">> => Conclusion, | |
<<"duration">> => Toc - Tic, | |
<<"started_at">> => Start | |
} | |
end | |
|| | |
#{ | |
<<"status">> := <<"completed">>, | |
<<"conclusion">> := Conclusion, | |
<<"started_at">> := Start, | |
<<"completed_at">> := End | |
} <- JobsData | |
]}, | |
maps:remove(<<"jobs_url">>, WithJob). | |
workflow_runs_summary(Runs) -> | |
Data = lists:flatten([ | |
maps:with( | |
[ | |
<<"display_title">>, | |
<<"pull_requests">>, | |
<<"conclusion">>, | |
<<"jobs_url">>, | |
<<"status">> | |
], | |
Run | |
) | |
|| | |
Run <- Runs | |
]), | |
[ | |
Datum#{ | |
<<"pull_requests">> => [ | |
#{ | |
pr => PR, | |
ref => Ref, | |
sha => SHA | |
} | |
|| | |
#{ | |
<<"number">> := PR, | |
<<"head">> := #{ | |
<<"ref">> := Ref, | |
<<"sha">> := SHA | |
} | |
} <- maps:get(<<"pull_requests">>, Datum) | |
] | |
} | |
|| | |
Datum <- Data | |
]. | |
% | |
% Helper functions | |
% | |
get(Path) -> | |
get(Path, #{}). | |
get(Path, Args) -> | |
timer:sleep(200), | |
URL = case Path of | |
<<"https", _/binary>> -> Path; | |
_ -> sp_httpc:make_url(?BASE_URL, Path, Args) | |
end, | |
% io:format(" >> Fetching ~s~n", [URL]), | |
sp_httpc:get(URL, headers()). | |
curl(Path) -> | |
curl(Path, #{}). | |
curl(Path, Args) -> | |
io:format("curl '~s' -H 'Authorization: Bearer ~s' -H 'Accept: application/vnd.github+json'~n", [sp_httpc:make_url(?BASE_URL, Path, Args), ?PAT]). | |
headers() -> #{ | |
accept => <<"application/vnd.github+json">>, | |
authorization => <<"Bearer ", ?PAT/binary>> | |
}. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment