-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_autorebase_sample.py
More file actions
195 lines (160 loc) Β· 8.17 KB
/
test_autorebase_sample.py
File metadata and controls
195 lines (160 loc) Β· 8.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
#!/usr/bin/env python3
"""
Test script to run complete AutoRebase process on sample data
"""
import asyncio
from pathlib import Path
from api.autorebase.core import AutoRebase
async def test_autorebase_sample():
"""Test the complete AutoRebase process with sample data"""
print("π Testing Complete AutoRebase Process")
print("=" * 50)
# Set up directories (simulating cloned repositories)
base_0_dir = Path("data/sample/base-1.0")
base_1_dir = Path("data/sample/base-1.1")
feature_0_dir = Path("data/sample/feature-5.0")
work_dir = Path("data/sample")
print(f"π Working with sample data:")
print(f" Base 0: {base_0_dir}")
print(f" Base 1: {base_1_dir}")
print(f" Feature 0: {feature_0_dir}")
print(f" Work Dir: {work_dir}")
# Check if directories exist
missing_dirs = []
for dir_path, name in [(base_0_dir, "Base 0"), (base_1_dir, "Base 1"), (feature_0_dir, "Feature 0")]:
if not dir_path.exists():
missing_dirs.append(f"{name}: {dir_path}")
if missing_dirs:
print(f"β Missing directories:")
for missing in missing_dirs:
print(f" - {missing}")
return
print("β
All directories found")
# Initialize AutoRebase
print(f"\nπ§ Initializing AutoRebase...")
autorebase = AutoRebase(work_dir=str(work_dir))
# Override the directory paths to point to our sample data
autorebase.base_0_dir = base_0_dir
autorebase.base_1_dir = base_1_dir
autorebase.feature_0_dir = feature_0_dir
print(f" Base 0 Dir: {autorebase.base_0_dir}")
print(f" Base 1 Dir: {autorebase.base_1_dir}")
print(f" Feature 0 Dir: {autorebase.feature_0_dir}")
# Run the complete AutoRebase process
print(f"\nπ Running AutoRebase process...")
print("-" * 30)
try:
result = await autorebase.run_autorebase()
print(f"\nπ AutoRebase Results:")
print(f" Success: {result['success']}")
print(f" Message: {result['message']}")
if 'details' in result:
details = result['details']
print(f"\nπ Detailed Results:")
print(f" Files processed: {details.get('files_processed', 0)}")
print(f" Patches generated: {details.get('patches_generated', 0)}")
# Step 1 results
if 'step1_results' in details:
step1 = details['step1_results']
print(f"\n Step 1 (base_0 -> feature_0 patches to base_1):")
print(f" Success: {step1.get('success', False)}")
print(f" Applied patches: {len(step1.get('applied_patches', []))}")
print(f" Failed patches: {len(step1.get('failed_patches', []))}")
if step1.get('applied_patches'):
print(f" Applied to files:")
for file in step1['applied_patches']:
print(f" β
{file}")
if step1.get('failed_patches'):
print(f" Failed on files:")
for file in step1['failed_patches']:
print(f" β {file}")
# Step 2 results
if 'step2_results' in details:
step2 = details['step2_results']
print(f"\n Step 2 (base_0 -> base_1 patches to feature_0):")
print(f" Success: {step2.get('success', False)}")
print(f" Applied patches: {len(step2.get('applied_patches', []))}")
print(f" Failed patches: {len(step2.get('failed_patches', []))}")
if step2.get('applied_patches'):
print(f" Applied to files:")
for file in step2['applied_patches']:
print(f" β
{file}")
if step2.get('failed_patches'):
print(f" Failed on files:")
for file in step2['failed_patches']:
print(f" β {file}")
# Changelog summary
if 'changelog' in details:
changelog = details['changelog']
print(f"\nπ Changelog Summary:")
print(f" Files processed: {len(changelog.get('files_processed', []))}")
print(f" Patches generated: {len(changelog.get('patches_generated', []))}")
print(f" Patches applied: {len(changelog.get('patches_applied', []))}")
print(f" Backup files (.orig): {len(changelog.get('backup_files', []))}")
print(f" Reject files (.rej): {len(changelog.get('reject_files', []))}")
print(f" 3-way merge calls: {len(changelog.get('three_way_merges', []))}")
# Show files processed
if changelog.get('files_processed'):
print(f"\n Files processed:")
for file in changelog['files_processed']:
print(f" π {file}")
# Show 3-way merge details
if changelog.get('three_way_merges'):
print(f"\n π 3-way merge attempts:")
for merge in changelog['three_way_merges']:
status = merge.get('status', 'unknown')
if status == 'success':
print(f" β
{merge['target_file']} (patch: {merge['patch_name']}) - AI resolved")
print(f" Conflict type: {merge.get('conflict_type', 'unknown')}")
print(f" Changes applied: {merge.get('changes_applied', [])}")
elif status == 'failed':
print(f" β {merge['target_file']} (patch: {merge['patch_name']}) - AI failed")
print(f" Error: {merge.get('error', 'unknown')}")
elif status == 'failed_no_requirements':
print(f" β οΈ {merge['target_file']} (patch: {merge['patch_name']}) - No requirements found")
else:
print(f" π {merge['target_file']} (patch: {merge['patch_name']}) - Status: {status}")
# Changelog file location
if 'changelog_path' in details:
print(f"\nπ Changelog saved to: {details['changelog_path']}")
# Check for generated files
print(f"\nπ Checking for generated files...")
# Check for .orig files in feature-5.1 directory
f1_dir = Path("data/sample/feature-5.1")
orig_files = []
if f1_dir.exists():
orig_files.extend(list(f1_dir.rglob("*.orig")))
if orig_files:
print(f" π¦ Backup files (.orig) created:")
for orig_file in orig_files:
print(f" - {orig_file}")
else:
print(f" βΉοΈ No backup files (.orig) found")
# Check for .rej files in feature-5.1 directory
rej_files = []
if f1_dir.exists():
rej_files.extend(list(f1_dir.rglob("*.rej")))
if rej_files:
print(f" π« Reject files (.rej) created:")
for rej_file in rej_files:
print(f" - {rej_file}")
else:
print(f" βΉοΈ No reject files (.rej) found")
# Check for created f1 files
f1_files = []
if f1_dir.exists():
f1_files = [f for f in f1_dir.rglob("*") if f.is_file() and not f.name.endswith(('.orig', '.rej', '.patch'))]
if f1_files:
print(f" π F1 files created:")
for f1_file in f1_files:
print(f" - {f1_file}")
else:
print(f" βΉοΈ No f1 files created")
print(f"\nβ
AutoRebase test completed successfully!")
except Exception as e:
print(f"\nβ AutoRebase test failed with error:")
print(f" {str(e)}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
asyncio.run(test_autorebase_sample())