""" Generate interactive HTML visualization for BPA test results from TRX files How to run: 1. Place this script anywhere in your repo (root, scripts/, tools/, etc.) 2. Open a terminal and run: python visualize_bpa_results.py # Scans current directory for .trx files python visualize_bpa_results.py --input ./results # Scan specific folder Quick tip: Type "python " then drag this file into your terminal to paste the full path. Using VS Code with Copilot? Just ask: "Run the visualize_bpa_results.py script on my TRX files" Requirements: Python 3.7+ (no external dependencies) """ import sys import argparse import re import json from pathlib import Path from collections import defaultdict import webbrowser import xml.etree.ElementTree as ET from datetime import datetime def parse_trx_file(file_path): """Parse TRX file and extract test run metadata and results""" try: tree = ET.parse(file_path) root = tree.getroot() # Define namespace ns = {'ns': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} # Extract test run info test_run = root.find('.//ns:TestRun', ns) run_name = test_run.get('name', '') if test_run else '' # Extract model name from run name or filename model_name = '' if run_name: # Extract from path like "C:\...\D&A - Inventory Insights.SemanticModel\definition" match = re.search(r'([^\\]+)\.SemanticModel', run_name) if match: model_name = match.group(1) if not model_name: # Fallback to filename pattern: 20251114_1705_BPA_Inventory_Insights.trx filename = Path(file_path).stem parts = filename.split('_BPA_') if len(parts) == 2: model_name = parts[1].replace('_', ' ') # Extract timestamps times = root.find('.//ns:Times', ns) start_time = times.get('start', '') if times is not None else '' finish_time = times.get('finish', '') if times is not None else '' # Extract result summary result_summary = root.find('.//ns:ResultSummary', ns) outcome = result_summary.get('outcome', 'Unknown') if result_summary is not None else 'Unknown' counters = root.find('.//ns:Counters', ns) stats = { 'total': int(counters.get('total', 0)) if counters is not None else 0, 'executed': int(counters.get('executed', 0)) if counters is not None else 0, 'passed': int(counters.get('passed', 0)) if counters is not None else 0, 'failed': int(counters.get('failed', 0)) if counters is not None else 0, 'inconclusive': int(counters.get('inconclusive', 0)) if counters is not None else 0, 'notExecuted': int(counters.get('notExecuted', 0)) if counters is not None else 0, } # Calculate pass rate pass_rate = (stats['passed'] / stats['total'] * 100) if stats['total'] > 0 else 0 # Extract test definitions (rules) rules = {} for unit_test in root.findall('.//ns:UnitTest', ns): test_id = unit_test.get('id', '') test_name = unit_test.get('name', '') properties = {} for prop in unit_test.findall('.//ns:Property', ns): key_elem = prop.find('ns:Key', ns) value_elem = prop.find('ns:Value', ns) if key_elem is not None and value_elem is not None: properties[key_elem.text] = value_elem.text rules[test_id] = { 'id': test_id, 'name': test_name, 'description': properties.get('Description', ''), 'severity': int(properties.get('Severity', 1)), 'category': properties.get('Category', 'Unknown'), 'rule_id': properties.get('RuleID', ''), } # Extract test results (violations) violations_by_rule = defaultdict(list) for result in root.findall('.//ns:UnitTestResult', ns): test_id = result.get('testId', '') outcome = result.get('outcome', '') if outcome == 'Failed': # Extract error message and stack trace (violation details) output = result.find('.//ns:Output', ns) if output is not None: error_info = output.find('.//ns:ErrorInfo', ns) if error_info is not None: message_elem = error_info.find('ns:Message', ns) stack_trace_elem = error_info.find('ns:StackTrace', ns) violation_count = '' if message_elem is not None and message_elem.text: violation_count = message_elem.text.strip() # Parse StackTrace to get individual violated objects if stack_trace_elem is not None and stack_trace_elem.text: stack_trace = stack_trace_elem.text.strip() # Parse "Objects in violation:\n Object1\n Object2\n..." if 'Objects in violation:' in stack_trace: objects_text = stack_trace.split('Objects in violation:')[1].strip() object_lines = [line.strip() for line in objects_text.split('\n') if line.strip()] for obj_line in object_lines: violations_by_rule[test_id].append({ 'object': obj_line, 'message': violation_count }) return { 'model_name': model_name, 'file_name': Path(file_path).name, 'start_time': start_time, 'finish_time': finish_time, 'outcome': outcome, 'stats': stats, 'pass_rate': pass_rate, 'rules': rules, 'violations': violations_by_rule } except Exception as e: print(f" āœ— Error parsing {Path(file_path).name}: {e}") return None def extract_object_name(violation_msg): """Extract object name from violation message""" # Try to extract object name from common patterns # Pattern 1: "Object: TableName.ColumnName" match = re.search(r'Object:\s*(.+?)(?:\s+-|\s*$)', violation_msg) if match: return match.group(1).strip() # Pattern 2: Look for table[column] pattern match = re.search(r"'([^']+)'\['([^']+)'\]", violation_msg) if match: return f"{match.group(1)}.{match.group(2)}" # Pattern 3: Look for table.column pattern match = re.search(r'(\w+)\.(\w+)', violation_msg) if match: return f"{match.group(1)}.{match.group(2)}" # Return first 50 chars if no pattern matches return violation_msg[:50] + '...' if len(violation_msg) > 50 else violation_msg def prepare_visualization_data(trx_data): """Prepare data structure for visualization""" rules = trx_data['rules'] violations = trx_data['violations'] # Group rules by category rules_by_category = defaultdict(list) for rule_id, rule in rules.items(): category = rule['category'] # Get violations for this rule rule_violations = violations.get(rule_id, []) rule_data = { 'id': rule_id, 'name': rule['name'], 'description': rule['description'], 'severity': rule['severity'], 'rule_id': rule['rule_id'], 'status': 'failed' if rule_violations else 'passed', 'violation_count': len(rule_violations), 'violations': rule_violations } rules_by_category[category].append(rule_data) # Calculate category statistics category_stats = {} for category, category_rules in rules_by_category.items(): total = len(category_rules) passed = sum(1 for r in category_rules if r['status'] == 'passed') failed = total - passed pass_rate = (passed / total * 100) if total > 0 else 0 category_stats[category] = { 'total': total, 'passed': passed, 'failed': failed, 'pass_rate': pass_rate, 'rules': category_rules } return { 'categories': category_stats, 'stats': trx_data['stats'], 'pass_rate': trx_data['pass_rate'] } def create_multi_model_html(models_data, output_path): """Create an interactive HTML with dropdown to select BPA results""" # Create JavaScript object with all models data models_json = json.dumps(models_data) html_content = f""" Power BI Semantic Models - BPA Results Viewer
Pass Rate: 0%
āœ… 0 Passed
āŒ 0 Failed
šŸ“‹ 0 Total Rules
šŸ’” How to use: Select a BPA analysis result from the dropdown above. Click on category headers to expand/collapse rule details. Failed rules show violation details with affected objects.

Legend

Passed
Failed
Info (Severity 1)
Warning (Severity 2)
Error (Severity 3)
""" with open(output_path, 'w', encoding='utf-8') as f: f.write(html_content) print(f"āœ“ Multi-model BPA viewer saved to: {output_path}") def main(): parser = argparse.ArgumentParser( description='Generate interactive HTML viewer for Tabular Editor BPA results', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: %(prog)s --input ./bpa_results --output bpa_viewer.html %(prog)s --input . --output report.html --no-browser %(prog)s # Scans current directory, opens in browser """ ) parser.add_argument( '--input', '-i', type=Path, default=Path.cwd(), help='Path to directory containing TRX files (default: current directory, searches recursively)' ) parser.add_argument( '--output', '-o', type=Path, default=None, help='Output HTML file path (default: bpa_results_viewer.html in input directory)' ) parser.add_argument( '--no-browser', action='store_true', help='Do not open browser after generating HTML' ) args = parser.parse_args() # Resolve input path input_path = args.input.resolve() if not input_path.exists(): print(f"āŒ Input path does not exist: {input_path}") return 1 # Find TRX files recursively if input is a directory if input_path.is_dir(): print(f"šŸ” Scanning for BPA TRX files in: {input_path}") trx_files = list(input_path.rglob("*.trx")) elif input_path.is_file() and input_path.suffix == '.trx': trx_files = [input_path] input_path = input_path.parent else: print(f"āŒ Invalid input: must be directory or .trx file") return 1 if not trx_files: print(f"āŒ No TRX files found in: {input_path}") print(" Run BPA analysis first to generate TRX files.") return 1 print(f"Found {len(trx_files)} BPA result file(s)") models_data = {} for trx_file in sorted(trx_files, reverse=True): # Most recent first print(f" Processing: {trx_file.name}") try: trx_data = parse_trx_file(trx_file) if trx_data: viz_data = prepare_visualization_data(trx_data) # Use filename as unique key file_key = trx_file.stem models_data[file_key] = { 'model_name': trx_data['model_name'], 'file_name': trx_data['file_name'], 'data': viz_data } pass_rate = viz_data['pass_rate'] print(f" āœ“ {viz_data['stats']['total']} rules, {pass_rate:.1f}% pass rate") else: print(f" ⚠ Could not parse file") except Exception as e: print(f" āœ— Error: {e}") if not models_data: print("No valid BPA results found.") return 1 # Determine output path if args.output: output_html = args.output.resolve() output_html.parent.mkdir(parents=True, exist_ok=True) else: output_html = input_path / "bpa_results_viewer.html" print(f"\nšŸŽØ Creating BPA results viewer...") create_multi_model_html(models_data, output_html) print(f"\nāœ… Done! Viewer saved to: {output_html}") print(f" BPA results available: {len(models_data)}") # Open in browser unless --no-browser specified if not args.no_browser: print(f" Opening in browser...") webbrowser.open(output_html.as_uri()) return 0 if __name__ == "__main__": sys.exit(main())