| 'use strict'; |
| |
| exports.__esModule = true; |
| exports.default = tokenize; |
| |
| var _tokenTypes = require('./tokenTypes'); |
| |
| var t = _interopRequireWildcard(_tokenTypes); |
| |
| function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } |
| |
| var wordEnd = /[ \n\t\r\(\)\*:;!&'"\+\|~>,=$^\[\]\\]|\/(?=\*)/g; |
| |
| function tokenize(input) { |
| var tokens = []; |
| var css = input.css.valueOf(); |
| var _css = css, |
| length = _css.length; |
| |
| var offset = -1; |
| var line = 1; |
| var start = 0; |
| var end = 0; |
| |
| var code = void 0, |
| content = void 0, |
| endColumn = void 0, |
| endLine = void 0, |
| escaped = void 0, |
| escapePos = void 0, |
| last = void 0, |
| lines = void 0, |
| next = void 0, |
| nextLine = void 0, |
| nextOffset = void 0, |
| quote = void 0, |
| tokenType = void 0; |
| |
| function unclosed(what, fix) { |
| if (input.safe) { |
| // fyi: this is never set to true. |
| css += fix; |
| next = css.length - 1; |
| } else { |
| throw input.error('Unclosed ' + what, line, start - offset, start); |
| } |
| } |
| |
| while (start < length) { |
| code = css.charCodeAt(start); |
| |
| if (code === t.newline) { |
| offset = start; |
| line += 1; |
| } |
| |
| switch (code) { |
| case t.newline: |
| case t.space: |
| case t.tab: |
| case t.cr: |
| case t.feed: |
| next = start; |
| do { |
| next += 1; |
| code = css.charCodeAt(next); |
| if (code === t.newline) { |
| offset = next; |
| line += 1; |
| } |
| } while (code === t.space || code === t.newline || code === t.tab || code === t.cr || code === t.feed); |
| |
| tokenType = t.space; |
| endLine = line; |
| endColumn = start - offset; |
| end = next; |
| break; |
| |
| case t.plus: |
| case t.greaterThan: |
| case t.tilde: |
| case t.pipe: |
| next = start; |
| do { |
| next += 1; |
| code = css.charCodeAt(next); |
| } while (code === t.plus || code === t.greaterThan || code === t.tilde || code === t.pipe); |
| |
| tokenType = t.combinator; |
| endLine = line; |
| endColumn = start - offset; |
| end = next; |
| break; |
| |
| // Consume these characters as single tokens. |
| case t.asterisk: |
| case t.ampersand: |
| case t.comma: |
| case t.equals: |
| case t.dollar: |
| case t.caret: |
| case t.openSquare: |
| case t.closeSquare: |
| case t.colon: |
| case t.semicolon: |
| case t.openParenthesis: |
| case t.closeParenthesis: |
| next = start; |
| tokenType = code; |
| endLine = line; |
| endColumn = start - offset; |
| end = next + 1; |
| break; |
| |
| case t.singleQuote: |
| case t.doubleQuote: |
| quote = code === t.singleQuote ? "'" : '"'; |
| next = start; |
| do { |
| escaped = false; |
| next = css.indexOf(quote, next + 1); |
| if (next === -1) { |
| unclosed('quote', quote); |
| } |
| escapePos = next; |
| while (css.charCodeAt(escapePos - 1) === t.backslash) { |
| escapePos -= 1; |
| escaped = !escaped; |
| } |
| } while (escaped); |
| |
| tokenType = t.str; |
| endLine = line; |
| endColumn = start - offset; |
| end = next + 1; |
| break; |
| |
| case t.backslash: |
| next = start; |
| escaped = true; |
| while (css.charCodeAt(next + 1) === t.backslash) { |
| next += 1; |
| escaped = !escaped; |
| } |
| code = css.charCodeAt(next + 1); |
| if (escaped && code !== t.slash && code !== t.space && code !== t.newline && code !== t.tab && code !== t.cr && code !== t.feed) { |
| next += 1; |
| } |
| |
| tokenType = t.word; |
| endLine = line; |
| endColumn = next - offset; |
| end = next + 1; |
| break; |
| |
| default: |
| if (code === t.slash && css.charCodeAt(start + 1) === t.asterisk) { |
| next = css.indexOf('*/', start + 2) + 1; |
| if (next === 0) { |
| unclosed('comment', '*/'); |
| } |
| |
| content = css.slice(start, next + 1); |
| lines = content.split('\n'); |
| last = lines.length - 1; |
| |
| if (last > 0) { |
| nextLine = line + last; |
| nextOffset = next - lines[last].length; |
| } else { |
| nextLine = line; |
| nextOffset = offset; |
| } |
| |
| tokenType = t.comment; |
| line = nextLine; |
| endLine = nextLine; |
| endColumn = next - nextOffset; |
| } else { |
| wordEnd.lastIndex = start + 1; |
| wordEnd.test(css); |
| if (wordEnd.lastIndex === 0) { |
| next = css.length - 1; |
| } else { |
| next = wordEnd.lastIndex - 2; |
| } |
| |
| tokenType = t.word; |
| endLine = line; |
| endColumn = next - offset; |
| } |
| |
| end = next + 1; |
| break; |
| } |
| |
| // Ensure that the token structure remains consistent |
| tokens.push([tokenType, // [0] Token type |
| line, // [1] Starting line |
| start - offset, // [2] Starting column |
| endLine, // [3] Ending line |
| endColumn, // [4] Ending column |
| start, // [5] Start position / Source index |
| end]); |
| |
| // Reset offset for the next token |
| if (nextOffset) { |
| offset = nextOffset; |
| nextOffset = null; |
| } |
| |
| start = end; |
| } |
| |
| return tokens; |
| } |
| module.exports = exports['default']; |