Skip to content

Commit 0ab770e

Browse files
authored
remove SCRIPT run mode (#303)
1 parent 297f2aa commit 0ab770e

9 files changed

Lines changed: 41 additions & 194 deletions

File tree

examples/eval.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ def main():
366366
elif mode == "profile":
367367
run_profiling(logger, tests)
368368
else:
369-
# TODO: Implement script mode
369+
# invalid mode
370370
return 2
371371

372372

scripts/ci_test_cuda.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,8 @@ def test_include_dirs(tmp_path: Path):
204204
{"eval.cu": eval_cu, "submission.cu": sub},
205205
header_files,
206206
flags=["-I.", f"-I{tmp_path}"],
207-
mode="script",
207+
mode=SubmissionMode.TEST.value,
208+
tests="size: 256; seed: 42\n",
208209
)
209210

210211
assert result.compilation.success is True

src/discord-cluster-manager/api/main.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,6 @@ async def run_submission( # noqa: C901
335335
allowed_modes = [
336336
SubmissionMode.TEST,
337337
SubmissionMode.BENCHMARK,
338-
SubmissionMode.SCRIPT,
339338
SubmissionMode.LEADERBOARD,
340339
]
341340
if submission_mode_enum not in allowed_modes:

src/discord-cluster-manager/bot.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
from cogs.admin_cog import AdminCog
1212
from cogs.leaderboard_cog import LeaderboardCog
1313
from cogs.misc_cog import BotManagerCog
14-
from cogs.submit_cog import SubmitCog
1514
from cogs.verify_run_cog import VerifyRunCog
1615
from discord import app_commands
1716
from discord.ext import commands
@@ -36,13 +35,6 @@ def __init__(self, debug_mode=False):
3635
super().__init__(intents=intents, command_prefix="!")
3736
self.debug_mode = debug_mode
3837

39-
# Create the run group for leaderboardless runs. Debugging only.
40-
if self.debug_mode:
41-
self.run_group = app_commands.Group(
42-
name="run", description="Run jobs on different platforms"
43-
)
44-
self.tree.add_command(self.run_group)
45-
4638
self.leaderboard_group = app_commands.Group(
4739
name="leaderboard", description="Leaderboard commands"
4840
)
@@ -65,7 +57,6 @@ async def setup_hook(self):
6557
logger.info(f"Syncing commands for staging guild {DISCORD_CLUSTER_STAGING_ID}")
6658
try:
6759
# Load cogs
68-
await self.add_cog(SubmitCog(self))
6960
await self.add_cog(BotManagerCog(self))
7061
await self.add_cog(LeaderboardCog(self))
7162
await self.add_cog(VerifyRunCog(self))

src/discord-cluster-manager/cogs/submit_cog.py

Lines changed: 0 additions & 104 deletions
This file was deleted.

src/discord-cluster-manager/consts.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ class Timeout(IntEnum):
88
BENCHMARK = 180
99
RANKED = 180
1010
COMPILE = 120
11-
SCRIPT = 120
1211

1312

1413
class SchedulerType(Enum):
@@ -88,15 +87,13 @@ class SubmissionMode(Enum):
8887
limited (no stdout/stderr).
8988
Private: Special run that does test followed by leaderboard (on a secret seed), but gives only
9089
very limited feedback.
91-
Script: Submit an arbitrary script.
9290
"""
9391

9492
TEST = "test"
9593
BENCHMARK = "benchmark"
9694
PROFILE = "profile"
9795
LEADERBOARD = "leaderboard"
9896
PRIVATE = "private"
99-
SCRIPT = "script"
10097

10198

10299
class Language(Enum):

src/discord-cluster-manager/report.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -342,20 +342,6 @@ def generate_report(result: FullResult) -> RunResultReport: # noqa: C901
342342
make_benchmark_log(bench_run),
343343
)
344344

345-
if "script" in runs:
346-
run = runs["script"]
347-
if run.compilation is not None and not run.compilation.success:
348-
_generate_compile_report(report, run.compilation)
349-
return report
350-
351-
run = run.run
352-
# OK, we were successful
353-
message = "# Success!\n"
354-
message += "Command "
355-
message += f"```bash\n{limit_length(run.command, 1000)}```\n"
356-
message += f"ran successfully in {run.duration:.2f} seconds.\n"
357-
report.add_text(message)
358-
359345
if len(runs) == 1:
360346
run = next(iter(runs.values()))
361347
if len(run.run.stderr.strip()) > 0:

src/discord-cluster-manager/run_eval.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -306,8 +306,7 @@ def run_single_evaluation(
306306
bench_file.flush()
307307
return run_program(call + [mode, bench_file.name], seed=seed, timeout=timeout)
308308
else:
309-
assert mode == "script"
310-
return run_program(call, seed=seed, timeout=Timeout.SCRIPT)
309+
raise ValueError(f"Invalid mode {mode}")
311310

312311

313312
def make_system_info() -> SystemInfo:
@@ -506,12 +505,12 @@ def run_evaluation(
506505
"""
507506
Given a "runner" function `call`, interprets the mode
508507
and calls the runner with the right arguments.
509-
Simple modes (test, benchmark, profile, script) just
508+
Simple modes (test, benchmark, profile) just
510509
invoke the runner once, but private/leaderboard
511510
require multiple runner calls.
512511
"""
513512
results: dict[str, EvalResult] = {}
514-
if mode in ["test", "benchmark", "profile", "script"]:
513+
if mode in ["test", "benchmark", "profile"]:
515514
results[mode] = call(mode=mode)
516515
elif mode in ["private", "leaderboard"]:
517516
# first, run the tests

src/discord-cluster-manager/task.py

Lines changed: 35 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -142,65 +142,43 @@ def build_task_config(
142142
arch: str = None,
143143
mode: SubmissionMode = None,
144144
) -> dict:
145-
if task is None:
146-
assert mode == SubmissionMode.SCRIPT
147-
# TODO detect language
148-
lang = "py"
149-
150-
config = {
151-
"lang": lang,
152-
"arch": arch,
153-
}
154-
155-
eval_name = {"py": "eval.py", "cu": "eval.cu"}[lang]
156-
157-
if lang == "py":
158-
config["main"] = "eval.py"
159-
145+
all_files = {}
146+
for n, c in task.files.items():
147+
if c == "@SUBMISSION@":
148+
all_files[n] = submission_content
149+
else:
150+
all_files[n] = c
151+
152+
common = {
153+
"lang": task.lang.value,
154+
"arch": arch,
155+
"benchmarks": task.benchmarks,
156+
"tests": task.tests,
157+
"mode": mode.value,
158+
"test_timeout": task.test_timeout,
159+
"benchmark_timeout": task.benchmark_timeout,
160+
"ranked_timeout": task.ranked_timeout,
161+
"ranking_by": task.ranking_by.value,
162+
"seed": task.seed,
163+
}
164+
165+
if task.lang == Language.Python:
160166
return {
161-
**config,
162-
"sources": {
163-
eval_name: submission_content,
164-
},
167+
"main": task.config.main,
168+
"sources": all_files,
169+
**common,
165170
}
166171
else:
167-
all_files = {}
168-
for n, c in task.files.items():
169-
if c == "@SUBMISSION@":
170-
all_files[n] = submission_content
172+
sources = {}
173+
headers = {}
174+
for f in all_files:
175+
if f in task.config.sources:
176+
sources[f] = all_files[f]
171177
else:
172-
all_files[n] = c
173-
174-
common = {
175-
"lang": task.lang.value,
176-
"arch": arch,
177-
"benchmarks": task.benchmarks,
178-
"tests": task.tests,
179-
"mode": mode.value,
180-
"test_timeout": task.test_timeout,
181-
"benchmark_timeout": task.benchmark_timeout,
182-
"ranked_timeout": task.ranked_timeout,
183-
"ranking_by": task.ranking_by.value,
184-
"seed": task.seed,
185-
}
178+
headers[f] = all_files[f]
186179

187-
if task.lang == Language.Python:
188-
return {
189-
"main": task.config.main,
190-
"sources": all_files,
191-
**common,
192-
}
193-
else:
194-
sources = {}
195-
headers = {}
196-
for f in all_files:
197-
if f in task.config.sources:
198-
sources[f] = all_files[f]
199-
else:
200-
headers[f] = all_files[f]
201-
202-
return {
203-
"sources": sources,
204-
"headers": headers,
205-
"include_dirs": task.config.include_dirs,
206-
}
180+
return {
181+
"sources": sources,
182+
"headers": headers,
183+
"include_dirs": task.config.include_dirs,
184+
}

0 commit comments

Comments
 (0)