diff --git a/report/parse_logs.py b/report/parse_logs.py
index 60104bca1..e1c9c3151 100644
--- a/report/parse_logs.py
+++ b/report/parse_logs.py
@@ -15,7 +15,9 @@
information such as the crash details, crash symptoms,
stack traces, etc. to be rendered in the report."""
+import html
import re
+from datetime import datetime
from report.common import LogPart
@@ -35,10 +37,615 @@ class LogsParser:
def __init__(self, logs: list[LogPart]):
self._logs = logs
+ def _extract_bash_commands(self, content: str) -> list[str]:
+ """Extract and parse bash commands from content."""
+ commands = []
+ lines = content.split('\n')
+
+ for i, line in enumerate(lines):
+ line = line.strip()
+ if line == '':
+ command = self._process_bash_block(lines, i)
+ if command and command not in commands:
+ commands.append(command)
+
+ return commands
+
+ def _process_bash_block(self, lines: list[str], start_idx: int) -> str:
+ """Process a single bash block and extract command summary."""
+ for j in range(start_idx + 1, len(lines)):
+ if lines[j].strip() == '':
+ bash_content = '\n'.join(lines[start_idx + 1:j]).strip()
+ if bash_content:
+ return self._extract_command_from_content(bash_content)
+ break
+ return ""
+
+ def _extract_command_from_content(self, bash_content: str) -> str:
+ """Extract command summary from bash content."""
+ first_line = bash_content.split('\n', 1)[0].strip()
+ if not first_line:
+ return ""
+
+ # Skip comments and placeholder text
+ if (first_line.startswith('#') or first_line.startswith('[The command') or
+ first_line.startswith('No bash') or 'No bash' in first_line or
+ len(first_line) < 3):
+ return ""
+
+ parts = first_line.split()
+ if not parts:
+ return ""
+
+ cmd = parts[0]
+ command_summary = self._build_command_summary(cmd, parts, first_line)
+
+ if len(command_summary) > 40:
+ command_summary = command_summary[:37] + '...'
+
+ return command_summary
+
+ def _build_command_summary(self, cmd: str, parts: list[str],
+ first_line: str) -> str:
+ """Build command summary based on command type."""
+ if cmd == 'grep':
+ quoted_match = re.search(r"'([^']+)'", first_line)
+ if quoted_match:
+ search_term = quoted_match.group(1)
+ return f"grep '{search_term}'"
+ return self._extract_key_args(cmd, parts[1:], 1)
+ if cmd == 'cat':
+ return self._extract_key_args(cmd, parts[1:], 1)
+ return self._extract_key_args(cmd, parts[1:], 2)
+
+ def _extract_key_args(self, cmd: str, parts: list[str], max_args: int) -> str:
+ """Extract key arguments from command parts."""
+ key_args = []
+ for part in parts:
+ if not part.startswith('-') and len(part) > 1:
+ if len(part) > 20:
+ part = part[:17] + '...'
+ key_args.append(part)
+ if len(key_args) >= max_args:
+ break
+ return f"{cmd} {' '.join(key_args)}".strip()
+
+ def _extract_tool_names(self, content: str) -> list[str]:
+ """Extract tool names from content."""
+ tool_counts = {}
+ lines = content.split('\n')
+
+ # For step titles
+ relevant_tool_tags = [
+ '', '', '', '', '',
+ '', '', '', ''
+ ]
+
+ for i, line in enumerate(lines):
+ line = line.strip()
+ if line in relevant_tool_tags and not line.startswith(''):
+ if line == '':
+ # handled separately via regex to ensure non-empty only
+ continue
+ tool_name = line[1:-1].replace('_', ' ').title()
+ tool_counts[tool_name] = tool_counts.get(tool_name, 0) + 1
+
+ # Add 'Stderr' only if any block has non-empty inner content
+ for m in re.finditer(r'(.*?)', content, flags=re.DOTALL):
+ if m.group(1).strip():
+ tool_counts['Stderr'] = tool_counts.get('Stderr', 0) + 1
+ break
+
+ tool_names = []
+ for tool_name in tool_counts:
+ tool_names.append(tool_name)
+
+ return tool_names
+
+ def _parse_steps_from_logs(self, agent_logs: list[LogPart]) -> list[dict]:
+ """Parse steps from agent logs, grouping by chat prompt/response pairs."""
+ step_pattern = re.compile(r"Step #(\d+) - \"(.+?)\":")
+ simple_step_pattern = re.compile(r"Step #(\d+)")
+
+ steps_dict = {}
+ current_step_number = None
+
+ for log_part in agent_logs:
+ content = log_part.content.strip()
+ if not content:
+ continue
+
+ lines = content.split('\n')
+
+ step_header_found = False
+ for line in lines:
+ step_match = step_pattern.search(line)
+ if not step_match:
+ simple_match = simple_step_pattern.search(line)
+ if simple_match:
+ step_match = simple_match
+
+ if step_match:
+ step_header_found = True
+ current_step_number = step_match.group(1)
+
+ if current_step_number not in steps_dict:
+ steps_dict[current_step_number] = {
+ 'number': current_step_number,
+ 'type': 'Step',
+ 'log_parts': []
+ }
+ break
+
+ if not step_header_found and current_step_number:
+ steps_dict[current_step_number]['log_parts'].append(log_part)
+ elif not step_header_found and not current_step_number and not steps_dict:
+ steps_dict['0'] = {
+ 'number': None,
+ 'type': 'Content',
+ 'log_parts': [log_part]
+ }
+
+ return self._parse_steps_by_chat_pairs(agent_logs)
+
+ def _parse_steps_by_chat_pairs(self, agent_logs: list[LogPart]) -> list[dict]:
+ """Parse steps from agent logs by grouping chat prompt/response pairs."""
+ steps = []
+
+ first_prompt_idx = -1
+ for i, log_part in enumerate(agent_logs):
+ if log_part.chat_prompt:
+ first_prompt_idx = i
+ break
+
+ if first_prompt_idx == -1:
+ return []
+
+ steps.append({
+ 'number': '0 - System Instructions',
+ 'type': 'System Instructions',
+ 'log_parts': [agent_logs[first_prompt_idx]]
+ })
+
+ # Process logs after the system prompt to group into steps.
+ logs_to_process = agent_logs[first_prompt_idx + 1:]
+ step_counter = 1
+ current_step_parts = []
+
+ for log_part in logs_to_process:
+ if "agent-step" in log_part.content or "Trial ID:" in log_part.content:
+ continue
+
+ # A chat_response marks the beginning of a new step.
+ if log_part.chat_response:
+ if current_step_parts:
+ step_data = self._create_step_data(step_counter, current_step_parts)
+ steps.append(step_data)
+ step_counter += 1
+ current_step_parts = [log_part]
+ else:
+ current_step_parts.append(log_part)
+
+ # Append the last step.
+ if current_step_parts:
+ step_data = self._create_step_data(step_counter, current_step_parts)
+ steps.append(step_data)
+
+ return steps
+
+ def _convert_newlines_outside_tags(self, content: str) -> str:
+ """Convert \n to
tags when they appear outside XML tags."""
+ tag_pattern = r'</?[^&]*?>'
+
+ tag_matches = list(re.finditer(tag_pattern, content))
+
+ if not tag_matches:
+ return content.replace('\n', '
')
+
+ result = []
+ last_end = 0
+
+ for match in tag_matches:
+ # Process text before this tag
+ before_tag = content[last_end:match.start()]
+ result.append(before_tag.replace('\n', '
'))
+
+ # Add the tag itself (unchanged)
+ result.append(match.group())
+
+ last_end = match.end()
+
+ remaining = content[last_end:]
+ result.append(remaining.replace('\n', '
'))
+
+ return ''.join(result)
+
+ def syntax_highlight_content(self,
+ content: str,
+ default_language: str = "",
+ agent_name: str = "") -> str:
+ """Syntax highlights content while preserving visible tags."""
+
+ # Escape everything first so raw logs are safe to render in HTML
+ escaped = html.escape(content)
+
+ escaped = self._convert_newlines_outside_tags(escaped)
+
+ def _sub(pattern: str, repl: str, text: str) -> str:
+ return re.sub(pattern, repl, text, flags=re.DOTALL)
+
+ def _normalize_lang(lang: str) -> str:
+ if not lang:
+ return 'cpp'
+ lang = lang.strip().lower()
+ if lang in ['c++', 'cpp', 'cxx']:
+ return 'cpp'
+ if lang in ['c']:
+ return 'c'
+ if lang in ['python', 'py']:
+ return 'python'
+ if lang in ['java']:
+ return 'java'
+ if lang in ['rust', 'rs']:
+ return 'rust'
+ if lang in ['go', 'golang']:
+ return 'go'
+ return 'cpp'
+
+ lang_key = _normalize_lang(default_language)
+
+ # Pre-process stdout blocks to choose language based on preceding bash command
+ escaped = self._replace_stdout_with_language_blocks(escaped, lang_key)
+
+ # Pre-process stderr blocks to avoid greedy regex and stop at the first closing tag
+ escaped = self._replace_tag_with_code_blocks(escaped, 'stderr', 'bash')
+
+ escaped = _sub(
+ r'<conclusion>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</conclusion>',
+ r'<conclusion>'
+ r'\1
'
+ r'</conclusion>', escaped)
+ escaped = _sub(
+ r'<reason>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</reason>', r'<reason>'
+ r'\1
'
+ r'</reason>', escaped)
+
+ escaped = _sub(
+ r'<bash>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</bash>', r'<bash>'
+ r''
+ r'\1
'
+ r'</bash>', escaped)
+ escaped = _sub(
+ r'<build_script>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</build_script>',
+ r'<build_script>'
+ r''
+ r'\1
'
+ r'</build_script>', escaped)
+ escaped = _sub(
+ r'<fuzz target>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</fuzz target>',
+ rf'<fuzz target>'
+ rf''
+ rf'\1
'
+ rf'</fuzz target>', escaped)
+
+ escaped = _sub(
+ r'<return_code>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</return_code>',
+ r'<return_code>'
+ r''
+ r'\1
'
+ r'</return_code>', escaped)
+
+ escaped = _sub(
+ r'<build script>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</build script>',
+ r'<build script>'
+ r''
+ r'\1
'
+ r'</build script>', escaped)
+
+ escaped = _sub(
+ r'<gcb>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)</gcb>',
+ r'<gcb>'
+ r''
+ r'\1
'
+ r'</gcb>', escaped)
+
+ escaped = _sub(
+ r'<gdb>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)</gdb>',
+ r'<gdb>'
+ r''
+ r'\1
'
+ r'</gdb>', escaped)
+
+ escaped = _sub(
+ r'<gdb command>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</gdb command>',
+ r'<gdb command>'
+ r''
+ r'\1
'
+ r'</gdb command>', escaped)
+
+ escaped = _sub(
+ r'<gdb output>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</gdb output>',
+ r'<gdb output>'
+ r''
+ r'\1
'
+ r'</gdb output>', escaped)
+
+ escaped = _sub(
+ r'<code>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)</code>',
+ r'<code>'
+ r''
+ rf'\1
'
+ r'</code>', escaped)
+
+ escaped = _sub(
+ r'<solution>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</solution>', r'<solution>'
+ r''
+ rf'\1
'
+ r'</solution>', escaped)
+
+ def process_system_content(match):
+ content = match.group(1)
+ return (r'<system>'
+ r'' + content +
+ r'
</system>')
+
+ escaped = re.sub(
+ r'<system>(\s*[^\s].*?[^\s]\s*|(?:\s*[^\s].*?)?)'
+ r'</system>',
+ process_system_content,
+ escaped,
+ flags=re.DOTALL)
+
+ # Handle steps tag (usually opening only, no closing tag)
+ escaped = _sub(r'<steps>',
+ r'<steps>', escaped)
+
+ # Generic fallback for any remaining XML tags not explicitly handled above
+ # This ensures all XML tags get the log-tag styling
+ escaped = _sub(r'<([^/&][^&]*?)>',
+ r'<\1>', escaped)
+ escaped = _sub(r'<(/[^&]*?)>',
+ r'<\1>', escaped)
+
+ # Handle ExecutionStage-specific highlighting for fuzz target source
+ if "ExecutionStage" in agent_name:
+ escaped = self._highlight_execution_stage_content(escaped, lang_key)
+
+ return escaped
+
+ def _highlight_execution_stage_content(self, content: str,
+ lang_key: str) -> str:
+ """Add syntax highlighting for ExecutionStage-specific content patterns."""
+
+ # Pattern to match "Fuzz target source:" followed by code until
+ # "Build script source:"
+ fuzz_target_pattern = (r'(Fuzz target source:)\s*\n'
+ r'(.*?)'
+ r'(?=Build script source:|$)')
+
+ def replace_fuzz_target(match):
+ header = match.group(1)
+ code_content = match.group(2).strip()
+
+ if code_content:
+ return (
+ f'{header}
'
+ ''
+ f'{code_content}
')
+ return f'{header}
'
+
+ content = re.sub(fuzz_target_pattern,
+ replace_fuzz_target,
+ content,
+ flags=re.DOTALL)
+
+ return content
+
+ def _replace_stdout_with_language_blocks(self, escaped: str,
+ default_lang: str) -> str:
+ """Replace blocks with language-aware code blocks.
+ Chooses language based on the preceding command and file extensions.
+ """
+ pattern = re.compile(r'<(bash|stdout)>(.*?)</\1>', re.DOTALL)
+ matches = list(pattern.finditer(escaped))
+ if not matches:
+ return escaped
+
+ result_parts = []
+ cursor = 0
+ last_bash_content = ""
+
+ for m in matches:
+ tag = m.group(1)
+ content = m.group(2)
+ if tag == 'bash':
+ # Keep as-is; specialized bash replacement will handle it later
+ result_parts.append(escaped[cursor:m.end()])
+ cursor = m.end()
+ last_bash_content = content.strip()
+ else: # stdout
+ stdout_content = content
+ language = self._derive_language_for_stdout(last_bash_content,
+ stdout_content,
+ default_lang)
+ replacement = (
+ '<stdout>'
+ ''
+ f'{stdout_content}
'
+ '</stdout>')
+ result_parts.append(escaped[cursor:m.start()])
+ result_parts.append(replacement)
+ cursor = m.end()
+
+ result_parts.append(escaped[cursor:])
+ return ''.join(result_parts)
+
+ def _derive_language_for_stdout(self, bash_content: str, stdout_content: str,
+ default_lang: str) -> str:
+ """Derive a reasonable syntax highlight language for stdout.
+
+ - For `cat `: use file extension.
+ - For `grep ... `: use file extension if a file, else try to
+ infer from stdout first path; fallback to bash.
+ - For unknown: use bash.
+ """
+
+ def pick_from_path(path: str) -> str:
+ path = path.strip()
+ if not path:
+ return 'bash'
+ # Treat directories as bash
+ if path.endswith('/') or ('/' in path and '.' not in path.split('/')[-1]):
+ return 'bash'
+ lower = path.lower()
+ if lower.endswith(('.cc', '.cpp', '.cxx', '.hpp', '.hh', '.hxx')):
+ return 'cpp'
+ if lower.endswith('.h'):
+ return 'cpp'
+ if lower.endswith('.c'):
+ return 'c'
+ if lower.endswith('.py'):
+ return 'python'
+ if lower.endswith('.java'):
+ return 'java'
+ if lower.endswith('.rs'):
+ return 'rust'
+ if lower.endswith('.go'):
+ return 'go'
+ if lower.endswith('.sh'):
+ return 'bash'
+ return 'bash'
+
+ def first_token(text: str) -> str:
+ for line in text.split('\n'):
+ line = line.strip()
+ if line:
+ return line.split()[0]
+ return ''
+
+ def cat_target(text: str) -> str:
+ # Extract first non-option argument after 'cat'
+ for line in text.split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+ parts = line.split()
+ if not parts:
+ continue
+ if parts[0] != 'cat':
+ continue
+ for tok in parts[1:]:
+ if not tok.startswith('-'):
+ return tok
+ return ''
+
+ def grep_target_and_guess(text: str) -> str:
+ # Try to get last non-option token as path (common grep usage)
+ for line in text.split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+ parts = line.split()
+ if not parts or parts[0] != 'grep':
+ continue
+ # Remove options and pattern; pattern may be quoted (escaped as entities)
+ # Heuristic: take last token
+ candidate = parts[-1] if len(parts) > 1 else ''
+ return candidate
+ return ''
+
+ cmd = first_token(bash_content)
+ if cmd == 'cat':
+ target = cat_target(bash_content)
+ return pick_from_path(target)
+ if cmd == 'grep':
+ target = grep_target_and_guess(bash_content)
+ lang = pick_from_path(target)
+ if lang != 'bash':
+ return lang
+ # Try to infer from grep stdout: path:line:...
+ for line in stdout_content.split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+ # Lines often look like /path/file.ext:NN:content
+ path_part = line.split(':', 1)[0]
+ if path_part:
+ return pick_from_path(path_part)
+ return 'bash'
+ # Default behavior
+ return 'bash'
+
+ def _replace_tag_with_code_blocks(self, escaped: str, tag: str,
+ language: str) -> str:
+ """Replace a given escaped tag with a non-greedy code block wrapper.
+
+ Ensures that each opening tag pairs with the first subsequent closing tag.
+ """
+ open_tag = f'<{tag}>'
+ close_tag = f'</{tag}>'
+ pos = 0
+ parts: list[str] = []
+ while True:
+ start = escaped.find(open_tag, pos)
+ if start == -1:
+ parts.append(escaped[pos:])
+ break
+ parts.append(escaped[pos:start])
+ content_start = start + len(open_tag)
+ end = escaped.find(close_tag, content_start)
+ if end == -1:
+ # No closing tag; leave remainder as-is
+ parts.append(escaped[start:])
+ break
+ inner = escaped[content_start:end]
+ replacement = (
+ f'{open_tag}'
+ ''
+ f'{inner}
'
+ f'{close_tag}')
+ parts.append(replacement)
+ pos = end + len(close_tag)
+
+ return ''.join(parts)
+
+ def _create_step_data(self, step_number: int,
+ log_parts: list[LogPart]) -> dict:
+ """Create step data from log parts."""
+ step_data = {
+ 'number': str(step_number),
+ 'type': 'Step',
+ 'log_parts': log_parts
+ }
+
+ all_content = '\n'.join([part.content for part in log_parts])
+ tool_names = self._extract_tool_names(all_content)
+ bash_commands = self._extract_bash_commands(all_content)
+
+ if tool_names:
+ step_data['name'] = f"{', '.join(tool_names)}"
+ if bash_commands:
+ step_data['bash_commands'] = bash_commands
+
+ return step_data
+
def get_agent_sections(self) -> dict[str, list[LogPart]]:
"""Get the agent sections from the logs."""
- pattern = re.compile(r"\*{24}(.+?)\*{24}")
+ pattern = re.compile(r"\*{20,}([^*]+?)\*{20,}")
agent_sections = {}
current_agent = None
agent_counters = {}
@@ -91,19 +698,76 @@ def get_agent_cycles(self) -> list[dict]:
cycles_dict = {}
for agent_name, agent_logs in agent_sections.items():
+ # Parse steps for this agent
+ steps = self._parse_steps_from_logs(agent_logs)
+
cycle_match = re.search(r'\(Cycle (\d+)\)', agent_name)
if cycle_match:
cycle_number = int(cycle_match.group(1))
if cycle_number not in cycles_dict:
cycles_dict[cycle_number] = {}
- cycles_dict[cycle_number][agent_name] = agent_logs
+ cycles_dict[cycle_number][agent_name] = {
+ 'logs': agent_logs,
+ 'steps': steps
+ }
else:
if 0 not in cycles_dict:
cycles_dict[0] = {}
- cycles_dict[0][agent_name] = agent_logs
+ cycles_dict[0][agent_name] = {'logs': agent_logs, 'steps': steps}
return [cycles_dict[cycle] for cycle in sorted(cycles_dict.keys())]
+ def count_cycles(self) -> int:
+ """Count distinct cycle numbers present in the logs."""
+ agent_sections = self.get_agent_sections()
+ cycles: set[int] = set()
+ for agent_name in agent_sections.keys():
+ m = re.search(r'\(Cycle (\d+)\)', agent_name)
+ if m:
+ try:
+ cycles.add(int(m.group(1)))
+ except Exception:
+ pass
+ return len(cycles)
+
+ def extract_trial_timestamps(self) -> dict[str, list[datetime]]:
+ """Extract all timestamps grouped by Trial ID from agent logs.
+
+ Pattern example: "2025-09-04 08:20:04 [Trial ID: 03] INFO"
+ """
+ trial_to_times: dict[str, list[datetime]] = {}
+ ts_regex = re.compile(
+ r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*?\[Trial ID:\s*([^\]]+)\]')
+ for part in self._logs:
+ for line in part.content.split('\n'):
+ line = line.strip()
+ m = ts_regex.search(line)
+ if not m:
+ continue
+ ts_str = m.group(1)
+ trial_id = m.group(2).strip()
+ try:
+ ts = datetime.strptime(ts_str, '%Y-%m-%d %H:%M:%S')
+ except Exception:
+ continue
+ trial_to_times.setdefault(trial_id, []).append(ts)
+ return trial_to_times
+
+ def compute_trial_durations_seconds(self) -> dict[str, float]:
+ """Return per-trial duration in seconds as max(timestamp)-min(timestamp)."""
+ trial_to_times = self.extract_trial_timestamps()
+ durations: dict[str, float] = {}
+ for trial_id, times in trial_to_times.items():
+ if not times:
+ continue
+ try:
+ tmin = min(times)
+ tmax = max(times)
+ durations[trial_id] = max(0.0, (tmax - tmin).total_seconds())
+ except Exception:
+ continue
+ return durations
+
class RunLogsParser:
"""Parse the run log."""
@@ -172,36 +836,36 @@ def get_formatted_stack_traces(self,
memory_addr = parts[1]
remaining = parts[2]
- in_match = re.search(r'in (.+?) (/[^\s]+)', remaining)
+ in_match = re.search(r'in (.+?) (/[^^\s]+)', remaining)
if not in_match:
continue
function_name = in_match.group(1)
path = in_match.group(2)
- if '/src/' in path and 'llvm-project' not in path and self._benchmark_id and self._sample_id:
- path_parts = path.split(':')
- file_path = path_parts[0] # Just the file path without line numbers
- line_number = path_parts[1] if len(path_parts) > 1 else None
-
- relative_path = file_path.lstrip('/')
-
- # If coverage_report_path is set, it's a local run
- # Otherwise it's cloud
- if self._coverage_report_path:
- url = f'{self._coverage_report_path}{relative_path}.html'
- url_with_line_number = f'{url}#L{line_number}' if line_number else url
- else:
- url = (
- f'{base_url}/results/{self._benchmark_id}/code-coverage-reports/'
- f'{self._sample_id}.fuzz_target/report/linux/'
- f'{relative_path}.html')
- url_with_line_number = f'{url}#L{line_number}' if line_number else url
- stack_traces[frame_num] = {
- "url": url_with_line_number,
- "path": path,
- "function": function_name,
- "memory_address": memory_addr
- }
+ if '/src/' in path and 'llvm-project' not in path:
+ if self._benchmark_id and self._sample_id:
+ path_parts = path.split(':')
+ file_path = path_parts[0]
+ line_number = path_parts[1] if len(path_parts) > 1 else None
+
+ relative_path = file_path.lstrip('/')
+
+ # If coverage_report_path is set, it's a local run
+ # Otherwise it's cloud
+ if self._coverage_report_path:
+ url = f'{self._coverage_report_path}{relative_path}.html'
+ url_line_number = f'{url}#L{line_number}' if line_number else url
+ else:
+ url = (f'{base_url}/results/{self._benchmark_id}/'
+ f'code-coverage-reports/{self._sample_id}.fuzz_target/'
+ f'report/linux/{relative_path}.html')
+ url_line_number = f'{url}#L{line_number}' if line_number else url
+ stack_traces[frame_num] = {
+ "url": url_line_number,
+ "path": path,
+ "function": function_name,
+ "memory_address": memory_addr
+ }
return stack_traces
diff --git a/report/templates/base.html b/report/templates/base.html
index 9a4bcd0be..4fd070e36 100644
--- a/report/templates/base.html
+++ b/report/templates/base.html
@@ -87,12 +87,14 @@
background-color: #333333;
}
.chat_prompt {
- background-color: #fff7f2;
+ background-color: #e2e8f0;
+ border: 1px solid #a0aec0;
max-width: 50%;
overflow: scroll;
}
.chat_response {
- background-color: #fcfff2;
+ background-color: #ececec;
+ border: 1px solid #718096;
max-width: 50%;
overflow: scroll;
margin-left: auto;
@@ -280,6 +282,24 @@
.dark-mode .toc-item {
color: #9ca3af;
}
+
+.log-tag {
+ color: #8b7355;
+}
+
+.dark-mode .log-tag {
+ color: #a08968;
+}
+
+.chat_prompt .log-tag {
+ color: #5a6b8a;
+}
+
+.dark-mode .chat_prompt .log-tag {
+ color: #7a8ba8;
+}
+
+
Search Results
+
+
{% if unified_data %}
@@ -746,8 +768,8 @@
}
document.addEventListener('DOMContentLoaded', (event) => {
- document.querySelectorAll('pre code.syntax-highlight').forEach((block) => {
- hljs.highlightElement(block);
+ document.querySelectorAll('.prettify-benchmark-name').forEach(el => {
+ el.textContent = prettifyBenchmarkName(el.textContent);
});
const searchInput = document.getElementById('searchInput');
@@ -800,10 +822,6 @@
}, 300);
});
- document.querySelectorAll('.prettify-benchmark-name').forEach(el => {
- el.textContent = prettifyBenchmarkName(el.textContent);
- });
-
const tocTree = document.getElementById('toc-tree');
const sections = document.querySelectorAll('.toc-section');
sections.forEach((section, index) => {
@@ -913,16 +931,40 @@
});
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
+
+
+
+
+
+
+
+
+
+
+
+
+
+