bev-project/ANALYZE_RESULTS_SIMPLE.py

148 lines
4.7 KiB
Python
Raw Normal View History

2025-11-21 10:50:51 +08:00
#!/usr/bin/env python
"""
分析推理结果 - 不导入mmdet3d模块避免CUDA问题
"""
import os
import sys
# 添加项目路径但不导入mmdet3d
sys.path.insert(0, '/workspace/bevfusion')
# 只导入基础库
import pickle
import numpy as np
def load_results_without_mmdet3d(filepath):
"""不导入mmdet3d模块的情况下加载结果"""
# 临时重命名mmdet3d模块避免自动导入
import sys
mmdet3d_modules = [m for m in sys.modules.keys() if m.startswith('mmdet3d')]
for mod in mmdet3d_modules:
del sys.modules[mod]
# 阻止mmdet3d的自动导入
sys.modules['mmdet3d'] = None
sys.modules['mmdet3d.core'] = None
try:
with open(filepath, 'rb') as f:
# 使用纯Python方式加载避免触发__reduce__方法中的导入
import pickle
results = pickle.load(f)
return results
except Exception as e:
print(f"标准pickle加载失败: {e}")
return None
def analyze_results(results):
"""分析推理结果"""
print("="*60)
print("BEVFusion 单Batch推理结果分析")
print("="*60)
print(f"总样本数量: {len(results)}")
for sample_idx, sample in enumerate(results):
print(f"\n--- 样本 {sample_idx} ---")
print(f"数据类型: {type(sample)}")
print(f"包含字段: {list(sample.keys())}")
# 分析BEV分割
if 'masks_bev' in sample:
masks = sample['masks_bev']
print(f"BEV分割形状: {masks.shape}")
print("BEV分割数据类型:", type(masks))
if hasattr(masks, 'shape'): # numpy array or torch tensor
print(f"BEV分割数据类型: {masks.dtype if hasattr(masks, 'dtype') else 'unknown'}")
print("每个类别的像素统计:")
class_names = ['drivable_area', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area', 'divider']
for i in range(min(6, masks.shape[0])):
if i < len(class_names):
# 计算激活像素数量
if hasattr(masks, 'cpu'): # torch tensor
mask_data = masks[i].cpu().numpy()
else: # numpy array
mask_data = masks[i]
activated_pixels = (mask_data > 0.5).sum()
total_pixels = mask_data.size
percentage = activated_pixels / total_pixels * 100
print("15s")
# 分析Ground Truth
if 'gt_masks_bev' in sample:
gt_masks = sample['gt_masks_bev']
print(f"Ground Truth形状: {gt_masks.shape}")
# 分析3D检测
if 'boxes_3d' in sample:
boxes = sample['boxes_3d']
print(f"3D检测框数量: {len(boxes)}")
if 'scores_3d' in sample:
scores = sample['scores_3d']
if len(scores) > 0:
if hasattr(scores, 'cpu'):
scores_np = scores.cpu().numpy()
else:
scores_np = np.array(scores)
print(".3f")
print(".3f")
if 'labels_3d' in sample:
labels = sample['labels_3d']
if len(labels) > 0:
if hasattr(labels, 'cpu'):
labels_np = labels.cpu().numpy()
else:
labels_np = np.array(labels)
unique_labels, counts = np.unique(labels_np, return_counts=True)
print("检测类别分布:")
for label, count in zip(unique_labels, counts):
print(f" 类别 {int(label)}: {count}")
print("-" * 30)
def main():
# 推理结果文件路径
result_file = '/data/infer_test/20251120_124755/one_batch_results.pkl'
if not os.path.exists(result_file):
print(f"结果文件不存在: {result_file}")
# 查找可能的其他结果文件
import glob
possible_files = glob.glob('/data/infer_test/*/one_batch_results.pkl')
if possible_files:
print("找到以下可能的结果文件:")
for f in possible_files:
print(f" {f}")
result_file = possible_files[-1] # 使用最新的
print(f"使用: {result_file}")
else:
return
print(f"分析结果文件: {result_file}")
print(f"文件大小: {os.path.getsize(result_file)} bytes")
# 尝试加载结果
results = load_results_without_mmdet3d(result_file)
if results is None:
print("无法加载结果文件")
return
# 分析结果
analyze_results(results)
print("\n" + "="*60)
print("分析完成")
print("="*60)
if __name__ == '__main__':
main()