-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathloom.toml.example
More file actions
199 lines (185 loc) · 5.81 KB
/
loom.toml.example
File metadata and controls
199 lines (185 loc) · 5.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
[server]
host = "127.0.0.1"
port = 9000
[models.primary]
provider = "openai_compatible"
base_url = "http://localhost:1234/v1"
model = "minimax-m2.1"
max_tokens = 8192
temperature = 0.1
roles = ["planner", "verifier"]
[models.utility]
provider = "ollama"
base_url = "http://localhost:11434"
model = "qwen3:8b"
max_tokens = 4096
temperature = 0.0
roles = ["extractor", "executor"]
[models.compactor]
provider = "openai_compatible"
base_url = "https://generativelanguage.googleapis.com/v1beta/openai"
model = "gemini-2.5-flash"
api_key = "YOUR_GOOGLE_API_KEY"
max_tokens = 4096
temperature = 1.0
reasoning_effort = "none"
roles = ["compactor"]
[workspace]
default_path = "~/projects"
scratch_dir = "~/.loom/scratch"
[execution]
max_subtask_retries = 3
max_loop_iterations = 50
max_parallel_subtasks = 5
enable_streaming = true
delegate_task_timeout_seconds = 14400
ask_user_v2_enabled = true
ask_user_runtime_blocking_enabled = true
ask_user_durable_state_enabled = true
ask_user_api_enabled = false # question endpoints are intended for TUI-surface runs
ask_user_policy = "block" # block | timeout_default | fail_closed
ask_user_timeout_seconds = 0
ask_user_timeout_default_response = ""
ask_user_max_pending_per_task = 3
ask_user_max_questions_per_subtask = 25
ask_user_min_seconds_between_questions = 10
auto_approve_confidence_threshold = 0.8
enable_global_run_budget = false
max_task_wall_clock_seconds = 0
max_task_total_tokens = 0
max_task_model_invocations = 0
max_task_tool_calls = 0
max_task_mutating_tool_calls = 0
max_task_replans = 0
max_task_remediation_attempts = 0
enable_process_iteration_loops = true
enable_iteration_command_exit_gate = false
max_iteration_replans_after_exhaustion = 2
iteration_command_exit_allowlisted_prefixes = [
"pytest",
"uv run pytest",
"python -m pytest",
"python3 -m pytest",
"ruff check",
"npm test",
"pnpm test",
"bun test",
"go test",
"cargo test",
"make test",
]
executor_completion_contract_mode = "off" # off | warn | enforce
planner_degraded_mode = "allow" # allow | require_approval | deny
enable_sqlite_remediation_queue = false
enable_durable_task_runner = false
enable_mutation_idempotency = false
sealed_artifact_post_call_guard = "warn" # off | warn | enforce
enable_slo_metrics = false
cowork_tool_exposure_mode = "hybrid" # full | adaptive | hybrid
cowork_memory_index_enabled = true
cowork_memory_index_v2_actions_enabled = true
cowork_memory_index_force_fts = false
cowork_indexer_model_role_strict = false
cowork_memory_index_llm_extraction_enabled = true
cowork_memory_index_queue_max_batches = 32
cowork_memory_index_section_limit = 4
cowork_recall_index_max_chars = 1200
enable_software_dev_tools = false
enable_agent_tools = true
enable_wp_tools = true
wp_high_risk_requires_confirmation = true
agent_tools_allowed_providers = ["codex", "claude_code", "opencode"]
agent_tools_max_timeout_seconds = 1800
agent_tools_default_network_mode = "on" # on | off
[tui]
startup_landing_enabled = true
always_open_chat_directly = false
chat_resume_page_size = 250
chat_resume_max_rendered_rows = 1200
chat_resume_use_event_journal = true
chat_resume_enable_legacy_fallback = true
realtime_refresh_enabled = true
workspace_watch_backend = "poll" # poll | native
workspace_poll_interval_ms = 1000
workspace_refresh_debounce_ms = 250
workspace_refresh_max_wait_ms = 1500
workspace_scan_max_entries = 20000
chat_stream_flush_interval_ms = 120
files_panel_max_rows = 2000
delegate_progress_max_lines = 150
run_launch_heartbeat_interval_ms = 6000
run_launch_timeout_seconds = 300
run_close_modal_timeout_seconds = 45
run_cancel_wait_timeout_seconds = 10
run_progress_refresh_interval_ms = 200
run_preflight_async_enabled = true
[verification]
tier1_enabled = true
tier2_enabled = true
tier3_enabled = false
tier3_vote_count = 3
[limits]
adhoc_repair_source_max_chars = 0 # 0 = no truncation in ad hoc JSON repair
evidence_context_text_max_chars = 8192
planning_response_max_tokens = 16384
[limits.runner]
max_tool_iterations = 20
max_subtask_wall_clock_seconds = 1200
max_model_context_tokens = 24000
max_state_summary_chars = 640
max_verification_summary_chars = 8000
default_tool_result_output_chars = 2800
heavy_tool_result_output_chars = 3600
compact_tool_result_output_chars = 900
compact_text_output_chars = 1400
minimal_text_output_chars = 260
tool_call_argument_context_chars = 700
compact_tool_call_argument_chars = 1600
runner_compaction_policy_mode = "off"
enable_filetype_ingest_router = true
enable_artifact_telemetry_events = true
artifact_telemetry_max_metadata_chars = 1200
enable_model_overflow_fallback = true
ingest_artifact_retention_max_age_days = 14
ingest_artifact_retention_max_files_per_scope = 96
ingest_artifact_retention_max_bytes_per_scope = 268435456
[limits.verifier]
max_tool_args_chars = 360
max_tool_status_chars = 320
max_tool_calls_tokens = 4000
max_verifier_prompt_tokens = 12000
max_result_summary_chars = 7000
compact_result_summary_chars = 2600
max_evidence_section_chars = 4200
max_evidence_section_compact_chars = 2200
max_artifact_section_chars = 4200
max_artifact_section_compact_chars = 2200
max_tool_output_excerpt_chars = 1100
max_artifact_file_excerpt_chars = 800
[limits.compactor]
max_chunk_chars = 8000
max_chunks_per_round = 10
max_reduction_rounds = 2
min_compact_target_chars = 220
response_tokens_floor = 256
response_tokens_ratio = 0.55
response_tokens_buffer = 256
json_headroom_chars_floor = 128
json_headroom_chars_ratio = 0.30
json_headroom_chars_cap = 1024
chars_per_token_estimate = 2.8
token_headroom = 128
target_chars_ratio = 0.82
[telemetry]
mode = "active" # off | active | all_typed | debug
runtime_override_enabled = true
runtime_override_api_enabled = false
runtime_override_api_token = ""
persist_runtime_override = false
debug_diagnostics_rate_per_minute = 120
debug_diagnostics_burst = 30
[memory]
database_path = "~/.loom/loom.db"
[logging]
level = "INFO"
event_log_path = "~/.loom/logs"