Skip to content

Commit a57d587

Browse files
author
DavertMik
committed
Merge branch '4.x' of github.com:codeceptjs/CodeceptJS into 4.x
2 parents 867f7e3 + 7aef4e5 commit a57d587

19 files changed

Lines changed: 1088 additions & 521 deletions

bin/mcp-server.js

Lines changed: 287 additions & 131 deletions
Large diffs are not rendered by default.

docs/debugging.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,13 @@ After(({ I }) => {
107107
})
108108
```
109109

110+
### Pause Modes
111+
112+
`pause()` adapts to who's driving the test:
113+
114+
- **TTY (humans)** — when `process.stdin` is a terminal (running `npx codeceptjs run --debug` yourself), the readline REPL described above opens.
115+
- **MCP server (agent-driven debug)** — the MCP server registers an in-process pause handler before running tests, so when `pause()` fires inside a `run_test` invocation, control yields back to the agent. The agent drives the REPL through the [`pause` MCP tool](/mcp#pause). The same `I` container the test uses runs the agent's code, so artifacts (URL, ARIA, HTML, screenshot, console, storage) are captured against the live page.
116+
110117
## Pause Plugin
111118

112119
For automated debugging without modifying test code, use the `pause` plugin. It pauses tests based on different triggers, controlled entirely from the command line. The default is `on=fail`.

docs/locators.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ I.click({ role: 'button', name: 'Submit' }, '#login-form')
2222

2323
The context narrows the search to one region of the page, and the semantic string says what the user actually clicks. This is **more precise than ARIA or CSS alone** because it combines structural scope with human-readable intent.
2424

25-
Supported strategies: `css`, `xpath`, `id`, `name`, `role`, `frame`, `shadow`, `pw`. Shadow DOM and React selectors have their own pages — see [Shadow DOM](/shadow) and [React](/react). Playwright-specific locators (`_react`, `_vue`, `data-testid`) use the `pw` strategy: `{ pw: '_react=Button[name="Save"]' }`.
25+
Supported strategies: `css`, `xpath`, `id`, `name`, `role`, `frame`, `shadow`, `pw`. Shadow DOM and React selectors have their own pages — see [Shadow DOM](/shadow) and [React](/react). Playwright-specific locators use the `pw` strategy: `{ pw: '[data-testid="save"]' }`.
2626

2727
## Locator types at a glance
2828

docs/mcp.md

Lines changed: 94 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -235,44 +235,85 @@ Capture the current state of the browser without performing any action. Useful f
235235
}
236236
```
237237

238+
### continue
239+
240+
Release a paused test (one that called `pause()` during `run_test`) and let it run to completion. Returns the final reporter result.
241+
242+
To inspect or manipulate state while the test is paused, use [`run_code`](#run_code) — it operates on the same container the test is using.
243+
244+
**Parameters:**
245+
- `timeout` (optional): ms to wait for the test to finish after continuing (default 60000).
246+
247+
**Returns:**
248+
```json
249+
{
250+
"status": "completed",
251+
"reporterJson": { "stats": { "tests": 1, "passes": 1, "failures": 0 }, "tests": [...] },
252+
"error": null
253+
}
254+
```
255+
256+
**Example flow:**
257+
258+
```json
259+
{ "name": "run_test", "arguments": { "test": "checkout_test" } }
260+
// → { "status": "paused", "file": "...", "note": "..." }
261+
262+
{ "name": "run_code", "arguments": { "code": "return await I.grabCurrentUrl()" } }
263+
// → { "status": "success", "returnValue": "http://...", "artifacts": { ... } }
264+
265+
{ "name": "run_code", "arguments": { "code": "await I.click('Save')" } }
266+
// → { "status": "success", "artifacts": { ... } }
267+
268+
{ "name": "continue", "arguments": {} }
269+
// → { "status": "completed", "reporterJson": { ... } }
270+
```
271+
272+
**Notes:**
273+
- Pause runs in-process: `run_code` and the test share the same `I` / browser. There's no subprocess, no IPC.
274+
- `run_test` and `continue` wrap test execution in the same `withSilencedIO` helper that `run_step_by_step` uses, so step output doesn't interleave with the MCP JSON-RPC stream. Stdout/stderr are restored before each tool call returns.
275+
- TTY behaviour (`npx codeceptjs run --debug` at a terminal) is unchanged — `pause()` opens the readline REPL whenever `process.stdin.isTTY` is true.
276+
238277
### run_test
239278

240-
Run a specific test by name or file path. Uses subprocess to run tests with isolation.
279+
Run a specific test by name or file path. Runs in-process so it shares the same `I` / browser as `run_code` and `snapshot`. If the test calls `pause()` — or if `pauseAt` is set and the Nth step completes — this tool returns early and the agent drives the session through `run_code` and `continue`.
241280

242281
**Parameters:**
243282
- `test` (required): Test name or file path
244283
- `timeout` (optional): Timeout in milliseconds (default: 60000)
245284
- `config` (optional): Path to codecept.conf.js
285+
- `pauseAt` (optional): 1-based step index. The test pauses after the Nth step completes. Use this as a programmatic breakpoint without editing the test. Discover step indices via the `list` CLI (`--steps`) or via `run_step_by_step`.
246286

247-
**Returns:**
287+
**Returns (test completed normally):**
248288
```json
249289
{
250-
"meta": {
251-
"exitCode": 0,
252-
"cli": "/path/to/codecept.js",
253-
"root": "/project/root",
254-
"configPath": "/path/to/codecept.conf.js",
255-
"args": ["run", "--config", "...", "--reporter", "json", "test_file.js"],
256-
"resolvedFile": "/full/path/to/test_file.js"
257-
},
258-
"reporterJson": {
259-
"stats": {
260-
"tests": 3,
261-
"passes": 2,
262-
"failures": 1
263-
}
264-
},
265-
"stderr": "",
266-
"rawStdout": ""
290+
"status": "completed",
291+
"file": "/path/to/test.js",
292+
"reporterJson": { "stats": { "tests": 1, "passes": 1, "failures": 0 }, "tests": [...] },
293+
"error": null
294+
}
295+
```
296+
297+
**Returns (test reached `pause()` or `pauseAt`):**
298+
```json
299+
{
300+
"status": "paused",
301+
"file": "/path/to/test.js",
302+
"pausedAfter": { "index": 3, "name": "I.click(\"Save\")", "status": "passed" },
303+
"page": { "url": "https://example.com/checkout", "title": "Checkout", "contentSize": 18432 },
304+
"suggestions": [
305+
"Call snapshot to capture URL/HTML/ARIA/screenshot/console/storage at this point",
306+
"Call run_code to inspect or manipulate state (e.g. return await I.grabText(\"h1\"))",
307+
"Call continue to release the pause and let the test finish"
308+
]
267309
}
268310
```
269311

270312
**Features:**
271313
- Automatically resolves test names to file paths
272314
- Supports partial test name matching
273-
- Uses json reporter for structured output
274-
- Executes in subprocess for isolation
275-
- Includes stderr for debugging
315+
- Runs in-process; results assembled from CodeceptJS test events
316+
- Yields on `pause()` (or `pauseAt`) so the agent can inspect via `run_code` and release with `continue`
276317

277318
**Example:**
278319
```json
@@ -287,57 +328,52 @@ Run a specific test by name or file path. Uses subprocess to run tests with isol
287328

288329
### run_step_by_step
289330

290-
Run a test step by step with detailed step information including timing and status. Generates AI-friendly trace files.
331+
Run a test interactively, pausing after every step. Returns a paused payload after the first step completes — the agent then calls `continue` to advance one step at a time, or `run_code` / `snapshot` to inspect state at any pause.
291332

292333
**Parameters:**
293334
- `test` (required): Test name or file path
294-
- `timeout` (optional): Timeout in milliseconds (default: 60000)
335+
- `timeout` (optional): per-call timeout in milliseconds (default: 60000)
295336
- `config` (optional): Path to codecept.conf.js
296337

297-
**Returns:**
338+
**Returns (after each step):**
298339
```json
299340
{
300-
"stepByStep": true,
301-
"results": [
302-
{
303-
"test": "Navigate to homepage",
304-
"file": "/path/to/test.js",
305-
"traceFile": "file:///output/trace_Test_Name_abc123/trace.md",
306-
"status": "completed",
307-
"steps": [
308-
{
309-
"step": "I.amOnPage(\"/\")",
310-
"status": "passed",
311-
"time": 150
312-
},
313-
{
314-
"step": "I.seeInTitle(\"Test App\")",
315-
"status": "passed",
316-
"time": 50
317-
}
318-
]
319-
}
341+
"status": "paused",
342+
"file": "/path/to/test.js",
343+
"pausedAfter": { "index": 1, "name": "I.amOnPage(\"/\")", "status": "passed" },
344+
"page": { "url": "http://localhost:8000/", "title": "Test App", "contentSize": 1832 },
345+
"suggestions": [
346+
"Call snapshot to capture URL/HTML/ARIA/screenshot/console/storage at this point",
347+
"Call run_code to inspect or manipulate state ...",
348+
"Call continue to release the pause and let the test run the next step (or finish)"
320349
]
321350
}
322351
```
323352

324-
**Trace Files:**
325-
- Generated in `{output_dir}/trace_{TestName}_{hash}/`
326-
- Includes screenshots (PNG), page HTML, ARIA snapshots, console logs
327-
- `trace.md` file provides structured summary for AI analysis
328-
- Named with test title and hash for uniqueness
353+
**Returns (after the last step):**
354+
```json
355+
{ "status": "completed", "file": "...", "reporterJson": { "stats": {...}, "tests": [...] } }
356+
```
329357

330-
**Example:**
358+
**Flow:**
331359
```json
332-
{
333-
"name": "run_step_by_step",
334-
"arguments": {
335-
"test": "authentication_test",
336-
"timeout": 90000
337-
}
338-
}
360+
{ "name": "run_step_by_step", "arguments": { "test": "checkout_test" } }
361+
// → { "status": "paused", "pausedAfter": { "index": 1, ... } }
362+
363+
{ "name": "snapshot", "arguments": {} }
364+
// → full artifact bundle for step 1
365+
366+
{ "name": "continue", "arguments": {} }
367+
// → { "status": "paused", "pausedAfter": { "index": 2, ... } }
368+
369+
{ "name": "continue", "arguments": {} }
370+
// → ... and so on, until { "status": "completed", "reporterJson": {...} }
339371
```
340372

373+
For a one-shot breakpoint (pause once at a specific step rather than every step), use `run_test` with `pauseAt: N` instead.
374+
375+
For per-step trace artifacts written to disk (HTML / ARIA / screenshot / console / storage per step) without the interactive flow, enable the `aiTrace` plugin.
376+
341377
### start_browser
342378

343379
Start the browser session (initializes CodeceptJS container).

docs/playwright.md

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -478,11 +478,47 @@ When a test fails and video was enabled a video file is shown under the `artifac
478478

479479
Open video and use it to debug a failed test case. Video helps when running tests on CI. Configure your CI system to enable artifacts storage for `output/video` and review videos of failed test case to understand failures.
480480

481-
It is recommended to enable [subtitles](https://codecept.io/plugins/#subtitles) plugin which will generate subtitles from steps in `.srt` format. Subtitles file will be saved into after a video file so video player (like VLC) would load them automatically:
481+
## Screencast
482+
483+
For richer evidence than helper-level `video`, enable the [`screencast`](https://codecept.io/plugins/#screencast) plugin. It uses Playwright's `page.screencast` API (Playwright >= 1.59) to record WebM video with optional burned-in action captions and a standalone `.srt` subtitle track.
484+
485+
```js
486+
plugins: {
487+
screencast: {
488+
enabled: true,
489+
on: 'fail',
490+
}
491+
}
492+
```
493+
494+
`on: 'fail'` (default) deletes the recording when the test passes; `on: 'test'` keeps every test's video.
495+
496+
`captions: true` (default) burns `I.click()` / `I.fillField()` annotations into the video via `page.screencast.showActions()`. `subtitles: true` writes a standalone `.srt` file alongside the video — VLC and most players auto-load it.
497+
498+
```js
499+
plugins: {
500+
screencast: {
501+
enabled: true,
502+
on: 'test',
503+
captions: true,
504+
subtitles: true,
505+
}
506+
}
507+
```
482508

483509
![](https://user-images.githubusercontent.com/220264/131644090-38d1ca55-1ba1-41fa-8fd1-7dea2b7ae995.png)
484510

485-
## Trace <Badge text="Since 3.1" type="warning"/>
511+
CLI usage:
512+
513+
npx codeceptjs run -p screencast
514+
npx codeceptjs run -p screencast:on=test
515+
npx codeceptjs run -p screencast:on=test;captions=false;subtitles=true
516+
517+
The recording is attached to the test as `test.artifacts.screencast`; the `.srt` (when enabled) is attached as `test.artifacts.subtitle`.
518+
519+
> Enabling helper-level `video: true` **and** the `screencast` plugin produces two independent recordings (one in `output/videos/`, one in `output/screencast/`). Pick one.
520+
521+
## Trace
486522

487523
If video is not enough to descover why a test failed a [trace](https://playwright.dev/docs/trace-viewer/) can be recorded.
488524

docs/plugins.md

Lines changed: 41 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -700,6 +700,47 @@ Scenario('scenario tite', { disableRetryFailedStep: true }, () => {
700700
701701
* `config` &#x20;
702702
703+
## screencast
704+
705+
Records WebM video of tests using Playwright's screencast API (Playwright >= 1.59).
706+
When `captions` is enabled, action annotations are burned into the video; when
707+
`subtitles` is enabled, a standalone `.srt` file is also produced.
708+
709+
```js
710+
plugins: {
711+
screencast: {
712+
enabled: true,
713+
on: 'fail',
714+
}
715+
}
716+
```
717+
718+
#### `on=` modes
719+
720+
* **fail** — record while running; delete on pass, keep on fail (default)
721+
* **test** — record and keep every test's video
722+
723+
CLI examples:
724+
725+
npx codeceptjs run -p screencast
726+
npx codeceptjs run -p screencast:on=test
727+
npx codeceptjs run -p screencast:on=test;captions=false;subtitles=true
728+
729+
Possible config options:
730+
731+
* `captions`: burn-in action overlays via `page.screencast.showActions()`. Default: true.
732+
* `subtitles`: also write a standalone `.srt` file alongside the video. Default: false.
733+
* `video`: record a video. With `video=false, subtitles=true`, only the `.srt` is produced (next to `test.artifacts.video` if a helper recorded one). Default: true.
734+
* `size`: pass-through `{ width, height }` for `screencast.start`.
735+
* `quality`: pass-through 0–100 for `screencast.start`.
736+
737+
> Enabling Playwright's helper-level `video: true` and this plugin together
738+
> produces two independent recordings. Pick one.
739+
740+
### Parameters
741+
742+
* `config` &#x20;
743+
703744
## screenshot
704745
705746
Saves screenshots from the browser at points triggered by `on=`. Replaces the
@@ -812,20 +853,6 @@ plugins: {
812853
813854
* `config` &#x20;
814855
815-
## subtitles
816-
817-
Automatically captures steps as subtitle, and saves it as an artifact when a video is found for a failed test
818-
819-
#### Configuration
820-
821-
```js
822-
plugins: {
823-
subtitles: {
824-
enabled: true
825-
}
826-
}
827-
```
828-
829856
[1]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object
830857
831858
[2]: https://github.com/cenfun/monocart-coverage-reports?tab=readme-ov-file#default-options

examples/codecept.config.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,10 @@ export const config = {
5757
retryFailedStep: {
5858
enabled: false,
5959
},
60-
subtitles: {
60+
screencast: {
6161
enabled: true,
62+
on: 'test',
63+
subtitles: true,
6264
},
6365
aiTrace: {
6466
enabled: true,

lib/helper/Playwright.js

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ import MultipleElementsFound from './errors/MultipleElementsFound.js'
3636
import RemoteBrowserConnectionRefused from './errors/RemoteBrowserConnectionRefused.js'
3737
import Popup from './extras/Popup.js'
3838
import Console from './extras/Console.js'
39-
import { findReact, findVue, findByPlaywrightLocator } from './extras/PlaywrightReactVueLocator.js'
39+
import { findReact, findByPlaywrightLocator } from './extras/PlaywrightReactVueLocator.js'
4040
import { dropFile } from './scripts/dropFile.js'
4141
import WebElement from '../element/WebElement.js'
4242
import { selectElement } from './extras/elementSelection.js'
@@ -4223,13 +4223,10 @@ async function findByRole(context, locator) {
42234223
}
42244224

42254225
async function findElements(matcher, locator) {
4226-
// Check if locator is a Locator object with react/vue type, or a raw object with react/vue property
42274226
const isReactLocator = locator.type === 'react' || (locator.locator && locator.locator.react) || locator.react
4228-
const isVueLocator = locator.type === 'vue' || (locator.locator && locator.locator.vue) || locator.vue
42294227
const isPwLocator = locator.type === 'pw' || (locator.locator && locator.locator.pw) || locator.pw
42304228

42314229
if (isReactLocator) return findReact(matcher, locator)
4232-
if (isVueLocator) return findVue(matcher, locator)
42334230
if (isPwLocator) return findByPlaywrightLocator.call(this, matcher, locator)
42344231

42354232
// Handle role locators with text/exact options (e.g., {role: 'button', text: 'Submit', exact: true})
@@ -4245,7 +4242,6 @@ async function findElements(matcher, locator) {
42454242

42464243
async function findElement(matcher, locator) {
42474244
if (locator.react) return findReact(matcher, locator)
4248-
if (locator.vue) return findVue(matcher, locator)
42494245
if (locator.pw) return findByPlaywrightLocator.call(this, matcher, locator)
42504246

42514247
locator = new Locator(locator, 'css')

0 commit comments

Comments
 (0)