Tests: Added an option to accept the actual token stream (#2515)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
diff --git a/test-suite.html b/test-suite.html
index 2e2e171..e107990 100644
--- a/test-suite.html
+++ b/test-suite.html
@@ -93,8 +93,11 @@
<p>Your file is built up of two or three sections, separated by ten or more dashes <code>-</code>, starting at the begin of the line:</p>
<ol>
- <li>Your language snippet. The code you want to compile using Prism. (<strong>required</strong>)</li>
- <li>The simplified token stream you expect. Needs to be valid JSON. (<strong>required</strong>)</li>
+ <li>Your language snippet. The code you want to tokenize using Prism. (<strong>required</strong>)</li>
+ <li>
+ The simplified token stream you expect. Needs to be valid JSON. (<em>optional</em>) <br>
+ If there no token stream defined, the test case will fail unless the <code>--accept</code> flag is present when running the test command (e.g. <code>npm run test:languages -- --accept</code>). If the flag is present and there is no expected token stream, the runner will insert the actual token stream into the test case file, changing it.
+ </li>
<li>A comment explaining the test case. (<em>optional</em>)</li>
</ol>
<p>The easiest way would be to look at an existing test file:</p>
@@ -114,10 +117,25 @@
This is a comment explaining this test case.</code></pre>
+ <h2 id="writing-tests-the-easy-way">The easy way</h2>
+ <p>The easy way to create one or multiple new test case(s) is this:</p>
+
+ <ol>
+ <li>Create a new file for a new test case in <code>tests/languages/${language}</code>.</li>
+ <li>Insert the code you want to test (and nothing more).</li>
+ <li>Repeat the first two steps for as many test cases as you want.</li>
+ <li>Run <code>npm run test:languages -- --accept</code>.</li>
+ <li>Done.</li>
+ </ol>
+
+ <p>This works by making the test runner insert the actual token stream of you test code as the expected token stream. <strong>Carefully check that the inserted token stream is actually what you expect or else the test is meaningless!</strong></p>
+
+ <p>Optionally, you can then also add comments to test cases.</p>
+
<h2 id="writing-tests-explaining-the-simplified-token-stream">Explaining the simplified token stream</h2>
- <p>While compiling, Prism transforms your source code into a token stream. This is basically a tree of nested tokens (or arrays, or strings).</p>
+ <p>While highlighting, Prism transforms your source code into a token stream. This is basically a tree of nested tokens (or arrays, or strings).</p>
<p>As these trees are hard to write by hand, the test runner uses a simplified version of it.</p>
<p>It uses the following rules:</p>
<ul>
diff --git a/tests/helper/test-case.js b/tests/helper/test-case.js
index 0ebd277..a00ceda 100644
--- a/tests/helper/test-case.js
+++ b/tests/helper/test-case.js
@@ -49,43 +49,66 @@ module.exports = {
*
* @param {string} languageIdentifier
* @param {string} filePath
+ * @param {boolean} acceptEmpty
*/
- runTestCase(languageIdentifier, filePath) {
+ runTestCase(languageIdentifier, filePath, acceptEmpty) {
const testCase = this.parseTestCaseFile(filePath);
const usedLanguages = this.parseLanguageNames(languageIdentifier);
- if (null === testCase) {
- throw new Error("Test case file has invalid format (or the provided token stream is invalid JSON), please read the docs.");
- }
-
const Prism = PrismLoader.createInstance(usedLanguages.languages);
// the first language is the main language to highlight
- const simplifiedTokenStream = this.simpleTokenize(Prism, testCase.testSource, usedLanguages.mainLanguage);
+ const simplifiedTokenStream = this.simpleTokenize(Prism, testCase.code, usedLanguages.mainLanguage);
+
+ if (testCase.expectedTokenStream === null) {
+ // the test case doesn't have an expected value
+ if (!acceptEmpty) {
+ throw new Error('This test case doesn\'t have an expected toke n stream.'
+ + ' Either add the JSON of a token stream or run \`npm run test:languages -- --accept\`'
+ + ' to automatically add the current token stream.');
+ }
+
+ // change the file
+ const lineEnd = (/\r\n/.test(testCase.code) || !/\n/.test(testCase.code)) ? '\r\n' : '\n';
+ const separator = "\n\n----------------------------------------------------\n\n";
+ const pretty = TokenStreamTransformer.prettyprint(simplifiedTokenStream)
+ .replace(/^( +)/gm, m => {
+ return "\t".repeat(m.length / 4);
+ });
+
+ let content = testCase.code + separator + pretty;
+ if (testCase.comment) {
+ content += separator + testCase.comment;
+ }
+ content = content.replace(/\r?\n/g, lineEnd);
- const actual = JSON.stringify(simplifiedTokenStream);
- const expected = JSON.stringify(testCase.expectedTokenStream);
+ fs.writeFileSync(filePath, content, "utf-8");
+ } else {
+ // there is an expected value
+ const actual = JSON.stringify(simplifiedTokenStream);
+ const expected = JSON.stringify(testCase.expectedTokenStream);
- if (actual === expected) {
- // no difference
- return;
- }
+ if (actual === expected) {
+ // no difference
+ return;
+ }
- // The index of the first difference between the expected token stream and the actual token stream.
- // The index is in the raw expected token stream JSON of the test case.
- const diffIndex = translateIndexIgnoreSpaces(testCase.expectedJson, expected, firstDiff(expected, actual));
- const expectedJsonLines = testCase.expectedJson.substr(0, diffIndex).split(/\r\n?|\n/g);
- const columnNumber = expectedJsonLines.pop().length + 1;
- const lineNumber = testCase.expectedLineOffset + expectedJsonLines.length;
-
- const tokenStreamStr = TokenStreamTransformer.prettyprint(simplifiedTokenStream);
- const message = "\n\nActual Token Stream:" +
- "\n-----------------------------------------\n" +
- tokenStreamStr +
- "\n-----------------------------------------\n" +
- "File: " + filePath + ":" + lineNumber + ":" + columnNumber + "\n\n";
-
- assert.deepEqual(simplifiedTokenStream, testCase.expectedTokenStream, testCase.comment + message);
+ // The index of the first difference between the expected token stream and the actual token stream.
+ // The index is in the raw expected token stream JSON of the test case.
+ const diffIndex = translateIndexIgnoreSpaces(testCase.expectedJson, expected, firstDiff(expected, actual));
+ const expectedJsonLines = testCase.expectedJson.substr(0, diffIndex).split(/\r\n?|\n/g);
+ const columnNumber = expectedJsonLines.pop().length + 1;
+ const lineNumber = testCase.expectedLineOffset + expectedJsonLines.length;
+
+ const tokenStreamStr = TokenStreamTransformer.prettyprint(simplifiedTokenStream);
+ const message = "\n\nActual Token Stream:" +
+ "\n-----------------------------------------\n" +
+ tokenStreamStr +
+ "\n-----------------------------------------\n" +
+ "File: " + filePath + ":" + lineNumber + ":" + columnNumber + "\n\n";
+
+ assert.deepEqual(simplifiedTokenStream, testCase.expectedTokenStream, testCase.comment + message);
+ }
},
/**
@@ -160,33 +183,36 @@ module.exports = {
*
* @private
* @param {string} filePath
- * @returns {{testSource: string, expectedTokenStream: Array<string[]>, comment:string?}|null}
+ * @returns {ParsedTestCase}
+ *
+ * @typedef ParsedTestCase
+ * @property {string} code
+ * @property {string} expectedJson
+ * @property {number} expectedLineOffset
+ * @property {Array | null} expectedTokenStream
+ * @property {string} comment
*/
parseTestCaseFile(filePath) {
const testCaseSource = fs.readFileSync(filePath, "utf8");
- const testCaseParts = testCaseSource.split(/^-{10,}\w*$/m);
-
- try {
- const testCase = {
- testSource: testCaseParts[0].trim(),
- expectedJson: testCaseParts[1],
- expectedLineOffset: testCaseParts[0].split(/\r\n?|\n/g).length,
- expectedTokenStream: JSON.parse(testCaseParts[1]),
- comment: null
- };
-
- // if there are three parts, the third one is the comment
- // explaining the test case
- if (testCaseParts[2]) {
- testCase.comment = testCaseParts[2].trim();
- }
+ const testCaseParts = testCaseSource.split(/^-{10,}[ \t]*$/m);
- return testCase;
- }
- catch (e) {
- // the JSON can't be parsed (e.g. it could be empty)
- return null;
+ if (testCaseParts.length > 3) {
+ throw new Error("Invalid test case format: Too many sections.");
}
+
+ const code = testCaseParts[0].trim();
+ const expected = (testCaseParts[1] || '').trim();
+ const comment = (testCaseParts[2] || '').trim();
+
+ const testCase = {
+ code,
+ expectedJson: expected,
+ expectedLineOffset: code.split(/\r\n?|\n/g).length,
+ expectedTokenStream: expected ? JSON.parse(expected) : null,
+ comment
+ };
+
+ return testCase;
},
/**
diff --git a/tests/run.js b/tests/run.js
index b9c8893..4803dde 100644
--- a/tests/run.js
+++ b/tests/run.js
@@ -12,6 +12,8 @@ const testSuite =
// load complete test suite
: TestDiscovery.loadAllTests(__dirname + "/languages");
+const accept = !!argv.accept;
+
// define tests for all tests in all languages in the test suite
for (const language in testSuite) {
if (!testSuite.hasOwnProperty(language)) {
@@ -27,7 +29,7 @@ for (const language in testSuite) {
it("– should pass test case '" + fileName + "'", function () {
if (path.extname(filePath) === '.test') {
- TestCase.runTestCase(language, filePath);
+ TestCase.runTestCase(language, filePath, accept);
} else {
TestCase.runTestsWithHooks(language, require(filePath));
}