Merging upstream version 26.26.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
768f936511
commit
1ac9fca060
62 changed files with 938 additions and 453 deletions
81
.github/scripts/format_benchmark.py
vendored
Executable file
81
.github/scripts/format_benchmark.py
vendored
Executable file
|
@ -0,0 +1,81 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Format benchmark comparison output with visual indicators for GitHub markdown."""
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def format_benchmark_output(content):
|
||||
"""Add visual formatting to benchmark comparison output."""
|
||||
lines = content.split("\n")
|
||||
formatted_lines = []
|
||||
|
||||
for line in lines:
|
||||
# Skip empty lines and headers
|
||||
if not line.strip() or line.startswith("|") and "---" in line:
|
||||
formatted_lines.append(line)
|
||||
continue
|
||||
|
||||
# Process benchmark result lines
|
||||
if "|" in line and ("faster" in line or "slower" in line):
|
||||
# Extract the speed factor (e.g., "1.23x faster" or "1.10x slower")
|
||||
speed_match = re.search(r"(\d+\.\d+)x\s+(faster|slower)", line)
|
||||
if speed_match:
|
||||
factor = float(speed_match.group(1))
|
||||
direction = speed_match.group(2)
|
||||
|
||||
# Add visual indicators based on performance
|
||||
if direction == "faster":
|
||||
# Green indicator for faster
|
||||
if factor >= 2.0:
|
||||
indicator = "🟢🟢" # Double green for 2x+ faster
|
||||
elif factor >= 1.1:
|
||||
indicator = "🟢" # Single green for 1.1x+ faster
|
||||
else:
|
||||
indicator = "⚪" # White for marginal improvement
|
||||
formatted_text = f"{indicator} **{speed_match.group(0)}**"
|
||||
else:
|
||||
# Red indicator for slower
|
||||
if factor >= 2.0:
|
||||
indicator = "🔴🔴" # Double red for 2x+ slower
|
||||
elif factor >= 1.1:
|
||||
indicator = "🔴" # Single red for 1.1x+ slower
|
||||
else:
|
||||
indicator = "⚪" # White for marginal slowdown
|
||||
formatted_text = f"{indicator} **{speed_match.group(0)}**"
|
||||
|
||||
# Replace the original text with formatted version
|
||||
line = line.replace(speed_match.group(0), formatted_text)
|
||||
elif "not significant" in line:
|
||||
# Add neutral indicator for non-significant changes
|
||||
line = re.sub(r"not significant", "⚪ not significant", line)
|
||||
|
||||
formatted_lines.append(line)
|
||||
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python format_benchmark.py <input_file>")
|
||||
sys.exit(1)
|
||||
|
||||
input_file = sys.argv[1]
|
||||
|
||||
try:
|
||||
with open(input_file, "r") as f:
|
||||
content = f.read()
|
||||
|
||||
formatted = format_benchmark_output(content)
|
||||
print(formatted)
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{input_file}' not found")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
21
.github/workflows/benchmark-sqlglot.yml
vendored
21
.github/workflows/benchmark-sqlglot.yml
vendored
|
@ -61,14 +61,27 @@ jobs:
|
|||
- name: Compare benchmarks and save results
|
||||
run: |
|
||||
source ./.venv/bin/activate
|
||||
python -m pyperf compare_to bench_parse_pr.json bench_parse_main.json --table --table-format=md > bench_parse_comparison.txt
|
||||
python -m pyperf compare_to bench_optimize_pr.json bench_optimize_main.json --table --table-format=md > bench_optimize_comparison.txt
|
||||
python -m pyperf compare_to bench_parse_main.json bench_parse_pr.json --table --table-format=md > bench_parse_comparison_raw.txt
|
||||
python -m pyperf compare_to bench_optimize_main.json bench_optimize_pr.json --table --table-format=md > bench_optimize_comparison_raw.txt
|
||||
|
||||
# Format with colors
|
||||
python .github/scripts/format_benchmark.py bench_parse_comparison_raw.txt > bench_parse_comparison.txt
|
||||
python .github/scripts/format_benchmark.py bench_optimize_comparison_raw.txt > bench_optimize_comparison.txt
|
||||
- name: Combine benchmark outputs
|
||||
run: |
|
||||
echo "## Parsing Benchmark" > combined_benchmarks.md
|
||||
echo "## Benchmark Results" > combined_benchmarks.md
|
||||
echo "" >> combined_benchmarks.md
|
||||
echo "**Legend:**" >> combined_benchmarks.md
|
||||
echo "- 🟢🟢 = 2x+ faster" >> combined_benchmarks.md
|
||||
echo "- 🟢 = 1.1x - 2x faster" >> combined_benchmarks.md
|
||||
echo "- ⚪ = No significant change (< 1.1x)" >> combined_benchmarks.md
|
||||
echo "- 🔴 = 1.1x - 2x slower" >> combined_benchmarks.md
|
||||
echo "- 🔴🔴 = 2x+ slower" >> combined_benchmarks.md
|
||||
echo "" >> combined_benchmarks.md
|
||||
echo "### Parsing Benchmark" >> combined_benchmarks.md
|
||||
cat bench_parse_comparison.txt >> combined_benchmarks.md
|
||||
echo -e "\n---\n" >> combined_benchmarks.md
|
||||
echo "## Optimization Benchmark" >> combined_benchmarks.md
|
||||
echo "### Optimization Benchmark" >> combined_benchmarks.md
|
||||
cat bench_optimize_comparison.txt >> combined_benchmarks.md
|
||||
- name: Comment on PR for parse benchmark results
|
||||
uses: peter-evans/create-or-update-comment@v4
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue