702 lines
19 KiB
JavaScript
702 lines
19 KiB
JavaScript
|
|
/**
|
||
|
|
* Copyright 2013-present, Facebook, Inc.
|
||
|
|
* All rights reserved.
|
||
|
|
*
|
||
|
|
* This source code is licensed under the BSD-style license found in the
|
||
|
|
* LICENSE file in the root directory of this source tree. An additional grant
|
||
|
|
* of patent rights can be found in the PATENTS file in the same directory.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
/*jslint node: true*/
|
||
|
|
var Syntax = require('esprima-fb').Syntax;
|
||
|
|
var leadingIndentRegexp = /(^|\n)( {2}|\t)/g;
|
||
|
|
var nonWhiteRegexp = /(\S)/g;
|
||
|
|
|
||
|
|
/**
|
||
|
|
* A `state` object represents the state of the parser. It has "local" and
|
||
|
|
* "global" parts. Global contains parser position, source, etc. Local contains
|
||
|
|
* scope based properties like current class name. State should contain all the
|
||
|
|
* info required for transformation. It's the only mandatory object that is
|
||
|
|
* being passed to every function in transform chain.
|
||
|
|
*
|
||
|
|
* @param {string} source
|
||
|
|
* @param {object} transformOptions
|
||
|
|
* @return {object}
|
||
|
|
*/
|
||
|
|
function createState(source, rootNode, transformOptions) {
|
||
|
|
return {
|
||
|
|
/**
|
||
|
|
* A tree representing the current local scope (and its lexical scope chain)
|
||
|
|
* Useful for tracking identifiers from parent scopes, etc.
|
||
|
|
* @type {Object}
|
||
|
|
*/
|
||
|
|
localScope: {
|
||
|
|
parentNode: rootNode,
|
||
|
|
parentScope: null,
|
||
|
|
identifiers: {},
|
||
|
|
tempVarIndex: 0,
|
||
|
|
tempVars: []
|
||
|
|
},
|
||
|
|
/**
|
||
|
|
* The name (and, if applicable, expression) of the super class
|
||
|
|
* @type {Object}
|
||
|
|
*/
|
||
|
|
superClass: null,
|
||
|
|
/**
|
||
|
|
* The namespace to use when munging identifiers
|
||
|
|
* @type {String}
|
||
|
|
*/
|
||
|
|
mungeNamespace: '',
|
||
|
|
/**
|
||
|
|
* Ref to the node for the current MethodDefinition
|
||
|
|
* @type {Object}
|
||
|
|
*/
|
||
|
|
methodNode: null,
|
||
|
|
/**
|
||
|
|
* Ref to the node for the FunctionExpression of the enclosing
|
||
|
|
* MethodDefinition
|
||
|
|
* @type {Object}
|
||
|
|
*/
|
||
|
|
methodFuncNode: null,
|
||
|
|
/**
|
||
|
|
* Name of the enclosing class
|
||
|
|
* @type {String}
|
||
|
|
*/
|
||
|
|
className: null,
|
||
|
|
/**
|
||
|
|
* Whether we're currently within a `strict` scope
|
||
|
|
* @type {Bool}
|
||
|
|
*/
|
||
|
|
scopeIsStrict: null,
|
||
|
|
/**
|
||
|
|
* Indentation offset
|
||
|
|
* @type {Number}
|
||
|
|
*/
|
||
|
|
indentBy: 0,
|
||
|
|
/**
|
||
|
|
* Global state (not affected by updateState)
|
||
|
|
* @type {Object}
|
||
|
|
*/
|
||
|
|
g: {
|
||
|
|
/**
|
||
|
|
* A set of general options that transformations can consider while doing
|
||
|
|
* a transformation:
|
||
|
|
*
|
||
|
|
* - minify
|
||
|
|
* Specifies that transformation steps should do their best to minify
|
||
|
|
* the output source when possible. This is useful for places where
|
||
|
|
* minification optimizations are possible with higher-level context
|
||
|
|
* info than what jsxmin can provide.
|
||
|
|
*
|
||
|
|
* For example, the ES6 class transform will minify munged private
|
||
|
|
* variables if this flag is set.
|
||
|
|
*/
|
||
|
|
opts: transformOptions,
|
||
|
|
/**
|
||
|
|
* Current position in the source code
|
||
|
|
* @type {Number}
|
||
|
|
*/
|
||
|
|
position: 0,
|
||
|
|
/**
|
||
|
|
* Auxiliary data to be returned by transforms
|
||
|
|
* @type {Object}
|
||
|
|
*/
|
||
|
|
extra: {},
|
||
|
|
/**
|
||
|
|
* Buffer containing the result
|
||
|
|
* @type {String}
|
||
|
|
*/
|
||
|
|
buffer: '',
|
||
|
|
/**
|
||
|
|
* Source that is being transformed
|
||
|
|
* @type {String}
|
||
|
|
*/
|
||
|
|
source: source,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Cached parsed docblock (see getDocblock)
|
||
|
|
* @type {object}
|
||
|
|
*/
|
||
|
|
docblock: null,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Whether the thing was used
|
||
|
|
* @type {Boolean}
|
||
|
|
*/
|
||
|
|
tagNamespaceUsed: false,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* If using bolt xjs transformation
|
||
|
|
* @type {Boolean}
|
||
|
|
*/
|
||
|
|
isBolt: undefined,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Whether to record source map (expensive) or not
|
||
|
|
* @type {SourceMapGenerator|null}
|
||
|
|
*/
|
||
|
|
sourceMap: null,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Filename of the file being processed. Will be returned as a source
|
||
|
|
* attribute in the source map
|
||
|
|
*/
|
||
|
|
sourceMapFilename: 'source.js',
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Only when source map is used: last line in the source for which
|
||
|
|
* source map was generated
|
||
|
|
* @type {Number}
|
||
|
|
*/
|
||
|
|
sourceLine: 1,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Only when source map is used: last line in the buffer for which
|
||
|
|
* source map was generated
|
||
|
|
* @type {Number}
|
||
|
|
*/
|
||
|
|
bufferLine: 1,
|
||
|
|
|
||
|
|
/**
|
||
|
|
* The top-level Program AST for the original file.
|
||
|
|
*/
|
||
|
|
originalProgramAST: null,
|
||
|
|
|
||
|
|
sourceColumn: 0,
|
||
|
|
bufferColumn: 0
|
||
|
|
}
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Updates a copy of a given state with "update" and returns an updated state.
|
||
|
|
*
|
||
|
|
* @param {object} state
|
||
|
|
* @param {object} update
|
||
|
|
* @return {object}
|
||
|
|
*/
|
||
|
|
function updateState(state, update) {
|
||
|
|
var ret = Object.create(state);
|
||
|
|
Object.keys(update).forEach(function(updatedKey) {
|
||
|
|
ret[updatedKey] = update[updatedKey];
|
||
|
|
});
|
||
|
|
return ret;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Given a state fill the resulting buffer from the original source up to
|
||
|
|
* the end
|
||
|
|
*
|
||
|
|
* @param {number} end
|
||
|
|
* @param {object} state
|
||
|
|
* @param {?function} contentTransformer Optional callback to transform newly
|
||
|
|
* added content.
|
||
|
|
*/
|
||
|
|
function catchup(end, state, contentTransformer) {
|
||
|
|
if (end < state.g.position) {
|
||
|
|
// cannot move backwards
|
||
|
|
return;
|
||
|
|
}
|
||
|
|
var source = state.g.source.substring(state.g.position, end);
|
||
|
|
var transformed = updateIndent(source, state);
|
||
|
|
if (state.g.sourceMap && transformed) {
|
||
|
|
// record where we are
|
||
|
|
state.g.sourceMap.addMapping({
|
||
|
|
generated: { line: state.g.bufferLine, column: state.g.bufferColumn },
|
||
|
|
original: { line: state.g.sourceLine, column: state.g.sourceColumn },
|
||
|
|
source: state.g.sourceMapFilename
|
||
|
|
});
|
||
|
|
|
||
|
|
// record line breaks in transformed source
|
||
|
|
var sourceLines = source.split('\n');
|
||
|
|
var transformedLines = transformed.split('\n');
|
||
|
|
// Add line break mappings between last known mapping and the end of the
|
||
|
|
// added piece. So for the code piece
|
||
|
|
// (foo, bar);
|
||
|
|
// > var x = 2;
|
||
|
|
// > var b = 3;
|
||
|
|
// var c =
|
||
|
|
// only add lines marked with ">": 2, 3.
|
||
|
|
for (var i = 1; i < sourceLines.length - 1; i++) {
|
||
|
|
state.g.sourceMap.addMapping({
|
||
|
|
generated: { line: state.g.bufferLine, column: 0 },
|
||
|
|
original: { line: state.g.sourceLine, column: 0 },
|
||
|
|
source: state.g.sourceMapFilename
|
||
|
|
});
|
||
|
|
state.g.sourceLine++;
|
||
|
|
state.g.bufferLine++;
|
||
|
|
}
|
||
|
|
// offset for the last piece
|
||
|
|
if (sourceLines.length > 1) {
|
||
|
|
state.g.sourceLine++;
|
||
|
|
state.g.bufferLine++;
|
||
|
|
state.g.sourceColumn = 0;
|
||
|
|
state.g.bufferColumn = 0;
|
||
|
|
}
|
||
|
|
state.g.sourceColumn += sourceLines[sourceLines.length - 1].length;
|
||
|
|
state.g.bufferColumn +=
|
||
|
|
transformedLines[transformedLines.length - 1].length;
|
||
|
|
}
|
||
|
|
state.g.buffer +=
|
||
|
|
contentTransformer ? contentTransformer(transformed) : transformed;
|
||
|
|
state.g.position = end;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Returns original source for an AST node.
|
||
|
|
* @param {object} node
|
||
|
|
* @param {object} state
|
||
|
|
* @return {string}
|
||
|
|
*/
|
||
|
|
function getNodeSourceText(node, state) {
|
||
|
|
return state.g.source.substring(node.range[0], node.range[1]);
|
||
|
|
}
|
||
|
|
|
||
|
|
function _replaceNonWhite(value) {
|
||
|
|
return value.replace(nonWhiteRegexp, ' ');
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Removes all non-whitespace characters
|
||
|
|
*/
|
||
|
|
function _stripNonWhite(value) {
|
||
|
|
return value.replace(nonWhiteRegexp, '');
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Finds the position of the next instance of the specified syntactic char in
|
||
|
|
* the pending source.
|
||
|
|
*
|
||
|
|
* NOTE: This will skip instances of the specified char if they sit inside a
|
||
|
|
* comment body.
|
||
|
|
*
|
||
|
|
* NOTE: This function also assumes that the buffer's current position is not
|
||
|
|
* already within a comment or a string. This is rarely the case since all
|
||
|
|
* of the buffer-advancement utility methods tend to be used on syntactic
|
||
|
|
* nodes' range values -- but it's a small gotcha that's worth mentioning.
|
||
|
|
*/
|
||
|
|
function getNextSyntacticCharOffset(char, state) {
|
||
|
|
var pendingSource = state.g.source.substring(state.g.position);
|
||
|
|
var pendingSourceLines = pendingSource.split('\n');
|
||
|
|
|
||
|
|
var charOffset = 0;
|
||
|
|
var line;
|
||
|
|
var withinBlockComment = false;
|
||
|
|
var withinString = false;
|
||
|
|
lineLoop: while ((line = pendingSourceLines.shift()) !== undefined) {
|
||
|
|
var lineEndPos = charOffset + line.length;
|
||
|
|
charLoop: for (; charOffset < lineEndPos; charOffset++) {
|
||
|
|
var currChar = pendingSource[charOffset];
|
||
|
|
if (currChar === '"' || currChar === '\'') {
|
||
|
|
withinString = !withinString;
|
||
|
|
continue charLoop;
|
||
|
|
} else if (withinString) {
|
||
|
|
continue charLoop;
|
||
|
|
} else if (charOffset + 1 < lineEndPos) {
|
||
|
|
var nextTwoChars = currChar + line[charOffset + 1];
|
||
|
|
if (nextTwoChars === '//') {
|
||
|
|
charOffset = lineEndPos + 1;
|
||
|
|
continue lineLoop;
|
||
|
|
} else if (nextTwoChars === '/*') {
|
||
|
|
withinBlockComment = true;
|
||
|
|
charOffset += 1;
|
||
|
|
continue charLoop;
|
||
|
|
} else if (nextTwoChars === '*/') {
|
||
|
|
withinBlockComment = false;
|
||
|
|
charOffset += 1;
|
||
|
|
continue charLoop;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
if (!withinBlockComment && currChar === char) {
|
||
|
|
return charOffset + state.g.position;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Account for '\n'
|
||
|
|
charOffset++;
|
||
|
|
withinString = false;
|
||
|
|
}
|
||
|
|
|
||
|
|
throw new Error('`' + char + '` not found!');
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Catches up as `catchup` but replaces non-whitespace chars with spaces.
|
||
|
|
*/
|
||
|
|
function catchupWhiteOut(end, state) {
|
||
|
|
catchup(end, state, _replaceNonWhite);
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Catches up as `catchup` but removes all non-whitespace characters.
|
||
|
|
*/
|
||
|
|
function catchupWhiteSpace(end, state) {
|
||
|
|
catchup(end, state, _stripNonWhite);
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Removes all non-newline characters
|
||
|
|
*/
|
||
|
|
var reNonNewline = /[^\n]/g;
|
||
|
|
function stripNonNewline(value) {
|
||
|
|
return value.replace(reNonNewline, function() {
|
||
|
|
return '';
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Catches up as `catchup` but removes all non-newline characters.
|
||
|
|
*
|
||
|
|
* Equivalent to appending as many newlines as there are in the original source
|
||
|
|
* between the current position and `end`.
|
||
|
|
*/
|
||
|
|
function catchupNewlines(end, state) {
|
||
|
|
catchup(end, state, stripNonNewline);
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Same as catchup but does not touch the buffer
|
||
|
|
*
|
||
|
|
* @param {number} end
|
||
|
|
* @param {object} state
|
||
|
|
*/
|
||
|
|
function move(end, state) {
|
||
|
|
// move the internal cursors
|
||
|
|
if (state.g.sourceMap) {
|
||
|
|
if (end < state.g.position) {
|
||
|
|
state.g.position = 0;
|
||
|
|
state.g.sourceLine = 1;
|
||
|
|
state.g.sourceColumn = 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
var source = state.g.source.substring(state.g.position, end);
|
||
|
|
var sourceLines = source.split('\n');
|
||
|
|
if (sourceLines.length > 1) {
|
||
|
|
state.g.sourceLine += sourceLines.length - 1;
|
||
|
|
state.g.sourceColumn = 0;
|
||
|
|
}
|
||
|
|
state.g.sourceColumn += sourceLines[sourceLines.length - 1].length;
|
||
|
|
}
|
||
|
|
state.g.position = end;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Appends a string of text to the buffer
|
||
|
|
*
|
||
|
|
* @param {string} str
|
||
|
|
* @param {object} state
|
||
|
|
*/
|
||
|
|
function append(str, state) {
|
||
|
|
if (state.g.sourceMap && str) {
|
||
|
|
state.g.sourceMap.addMapping({
|
||
|
|
generated: { line: state.g.bufferLine, column: state.g.bufferColumn },
|
||
|
|
original: { line: state.g.sourceLine, column: state.g.sourceColumn },
|
||
|
|
source: state.g.sourceMapFilename
|
||
|
|
});
|
||
|
|
var transformedLines = str.split('\n');
|
||
|
|
if (transformedLines.length > 1) {
|
||
|
|
state.g.bufferLine += transformedLines.length - 1;
|
||
|
|
state.g.bufferColumn = 0;
|
||
|
|
}
|
||
|
|
state.g.bufferColumn +=
|
||
|
|
transformedLines[transformedLines.length - 1].length;
|
||
|
|
}
|
||
|
|
state.g.buffer += str;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Update indent using state.indentBy property. Indent is measured in
|
||
|
|
* double spaces. Updates a single line only.
|
||
|
|
*
|
||
|
|
* @param {string} str
|
||
|
|
* @param {object} state
|
||
|
|
* @return {string}
|
||
|
|
*/
|
||
|
|
function updateIndent(str, state) {
|
||
|
|
/*jshint -W004*/
|
||
|
|
var indentBy = state.indentBy;
|
||
|
|
if (indentBy < 0) {
|
||
|
|
for (var i = 0; i < -indentBy; i++) {
|
||
|
|
str = str.replace(leadingIndentRegexp, '$1');
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
for (var i = 0; i < indentBy; i++) {
|
||
|
|
str = str.replace(leadingIndentRegexp, '$1$2$2');
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return str;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Calculates indent from the beginning of the line until "start" or the first
|
||
|
|
* character before start.
|
||
|
|
* @example
|
||
|
|
* " foo.bar()"
|
||
|
|
* ^
|
||
|
|
* start
|
||
|
|
* indent will be " "
|
||
|
|
*
|
||
|
|
* @param {number} start
|
||
|
|
* @param {object} state
|
||
|
|
* @return {string}
|
||
|
|
*/
|
||
|
|
function indentBefore(start, state) {
|
||
|
|
var end = start;
|
||
|
|
start = start - 1;
|
||
|
|
|
||
|
|
while (start > 0 && state.g.source[start] != '\n') {
|
||
|
|
if (!state.g.source[start].match(/[ \t]/)) {
|
||
|
|
end = start;
|
||
|
|
}
|
||
|
|
start--;
|
||
|
|
}
|
||
|
|
return state.g.source.substring(start + 1, end);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getDocblock(state) {
|
||
|
|
if (!state.g.docblock) {
|
||
|
|
var docblock = require('./docblock');
|
||
|
|
state.g.docblock =
|
||
|
|
docblock.parseAsObject(docblock.extract(state.g.source));
|
||
|
|
}
|
||
|
|
return state.g.docblock;
|
||
|
|
}
|
||
|
|
|
||
|
|
function identWithinLexicalScope(identName, state, stopBeforeNode) {
|
||
|
|
var currScope = state.localScope;
|
||
|
|
while (currScope) {
|
||
|
|
if (currScope.identifiers[identName] !== undefined) {
|
||
|
|
return true;
|
||
|
|
}
|
||
|
|
|
||
|
|
if (stopBeforeNode && currScope.parentNode === stopBeforeNode) {
|
||
|
|
break;
|
||
|
|
}
|
||
|
|
|
||
|
|
currScope = currScope.parentScope;
|
||
|
|
}
|
||
|
|
return false;
|
||
|
|
}
|
||
|
|
|
||
|
|
function identInLocalScope(identName, state) {
|
||
|
|
return state.localScope.identifiers[identName] !== undefined;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* @param {object} boundaryNode
|
||
|
|
* @param {?array} path
|
||
|
|
* @return {?object} node
|
||
|
|
*/
|
||
|
|
function initScopeMetadata(boundaryNode, path, node) {
|
||
|
|
return {
|
||
|
|
boundaryNode: boundaryNode,
|
||
|
|
bindingPath: path,
|
||
|
|
bindingNode: node
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
function declareIdentInLocalScope(identName, metaData, state) {
|
||
|
|
state.localScope.identifiers[identName] = {
|
||
|
|
boundaryNode: metaData.boundaryNode,
|
||
|
|
path: metaData.bindingPath,
|
||
|
|
node: metaData.bindingNode,
|
||
|
|
state: Object.create(state)
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
function getLexicalBindingMetadata(identName, state) {
|
||
|
|
var currScope = state.localScope;
|
||
|
|
while (currScope) {
|
||
|
|
if (currScope.identifiers[identName] !== undefined) {
|
||
|
|
return currScope.identifiers[identName];
|
||
|
|
}
|
||
|
|
|
||
|
|
currScope = currScope.parentScope;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function getLocalBindingMetadata(identName, state) {
|
||
|
|
return state.localScope.identifiers[identName];
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Apply the given analyzer function to the current node. If the analyzer
|
||
|
|
* doesn't return false, traverse each child of the current node using the given
|
||
|
|
* traverser function.
|
||
|
|
*
|
||
|
|
* @param {function} analyzer
|
||
|
|
* @param {function} traverser
|
||
|
|
* @param {object} node
|
||
|
|
* @param {array} path
|
||
|
|
* @param {object} state
|
||
|
|
*/
|
||
|
|
function analyzeAndTraverse(analyzer, traverser, node, path, state) {
|
||
|
|
if (node.type) {
|
||
|
|
if (analyzer(node, path, state) === false) {
|
||
|
|
return;
|
||
|
|
}
|
||
|
|
path.unshift(node);
|
||
|
|
}
|
||
|
|
|
||
|
|
getOrderedChildren(node).forEach(function(child) {
|
||
|
|
traverser(child, path, state);
|
||
|
|
});
|
||
|
|
|
||
|
|
node.type && path.shift();
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* It is crucial that we traverse in order, or else catchup() on a later
|
||
|
|
* node that is processed out of order can move the buffer past a node
|
||
|
|
* that we haven't handled yet, preventing us from modifying that node.
|
||
|
|
*
|
||
|
|
* This can happen when a node has multiple properties containing children.
|
||
|
|
* For example, XJSElement nodes have `openingElement`, `closingElement` and
|
||
|
|
* `children`. If we traverse `openingElement`, then `closingElement`, then
|
||
|
|
* when we get to `children`, the buffer has already caught up to the end of
|
||
|
|
* the closing element, after the children.
|
||
|
|
*
|
||
|
|
* This is basically a Schwartzian transform. Collects an array of children,
|
||
|
|
* each one represented as [child, startIndex]; sorts the array by start
|
||
|
|
* index; then traverses the children in that order.
|
||
|
|
*/
|
||
|
|
function getOrderedChildren(node) {
|
||
|
|
var queue = [];
|
||
|
|
for (var key in node) {
|
||
|
|
if (node.hasOwnProperty(key)) {
|
||
|
|
enqueueNodeWithStartIndex(queue, node[key]);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
queue.sort(function(a, b) { return a[1] - b[1]; });
|
||
|
|
return queue.map(function(pair) { return pair[0]; });
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Helper function for analyzeAndTraverse which queues up all of the children
|
||
|
|
* of the given node.
|
||
|
|
*
|
||
|
|
* Children can also be found in arrays, so we basically want to merge all of
|
||
|
|
* those arrays together so we can sort them and then traverse the children
|
||
|
|
* in order.
|
||
|
|
*
|
||
|
|
* One example is the Program node. It contains `body` and `comments`, both
|
||
|
|
* arrays. Lexographically, comments are interspersed throughout the body
|
||
|
|
* nodes, but esprima's AST groups them together.
|
||
|
|
*/
|
||
|
|
function enqueueNodeWithStartIndex(queue, node) {
|
||
|
|
if (typeof node !== 'object' || node === null) {
|
||
|
|
return;
|
||
|
|
}
|
||
|
|
if (node.range) {
|
||
|
|
queue.push([node, node.range[0]]);
|
||
|
|
} else if (Array.isArray(node)) {
|
||
|
|
for (var ii = 0; ii < node.length; ii++) {
|
||
|
|
enqueueNodeWithStartIndex(queue, node[ii]);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Checks whether a node or any of its sub-nodes contains
|
||
|
|
* a syntactic construct of the passed type.
|
||
|
|
* @param {object} node - AST node to test.
|
||
|
|
* @param {string} type - node type to lookup.
|
||
|
|
*/
|
||
|
|
function containsChildOfType(node, type) {
|
||
|
|
return containsChildMatching(node, function(node) {
|
||
|
|
return node.type === type;
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
function containsChildMatching(node, matcher) {
|
||
|
|
var foundMatchingChild = false;
|
||
|
|
function nodeTypeAnalyzer(node) {
|
||
|
|
if (matcher(node) === true) {
|
||
|
|
foundMatchingChild = true;
|
||
|
|
return false;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
function nodeTypeTraverser(child, path, state) {
|
||
|
|
if (!foundMatchingChild) {
|
||
|
|
foundMatchingChild = containsChildMatching(child, matcher);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
analyzeAndTraverse(
|
||
|
|
nodeTypeAnalyzer,
|
||
|
|
nodeTypeTraverser,
|
||
|
|
node,
|
||
|
|
[]
|
||
|
|
);
|
||
|
|
return foundMatchingChild;
|
||
|
|
}
|
||
|
|
|
||
|
|
var scopeTypes = {};
|
||
|
|
scopeTypes[Syntax.ArrowFunctionExpression] = true;
|
||
|
|
scopeTypes[Syntax.FunctionExpression] = true;
|
||
|
|
scopeTypes[Syntax.FunctionDeclaration] = true;
|
||
|
|
scopeTypes[Syntax.Program] = true;
|
||
|
|
|
||
|
|
function getBoundaryNode(path) {
|
||
|
|
for (var ii = 0; ii < path.length; ++ii) {
|
||
|
|
if (scopeTypes[path[ii].type]) {
|
||
|
|
return path[ii];
|
||
|
|
}
|
||
|
|
}
|
||
|
|
throw new Error(
|
||
|
|
'Expected to find a node with one of the following types in path:\n' +
|
||
|
|
JSON.stringify(Object.keys(scopeTypes))
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getTempVar(tempVarIndex) {
|
||
|
|
return '$__' + tempVarIndex;
|
||
|
|
}
|
||
|
|
|
||
|
|
function injectTempVar(state) {
|
||
|
|
var tempVar = '$__' + (state.localScope.tempVarIndex++);
|
||
|
|
state.localScope.tempVars.push(tempVar);
|
||
|
|
return tempVar;
|
||
|
|
}
|
||
|
|
|
||
|
|
function injectTempVarDeclarations(state, index) {
|
||
|
|
if (state.localScope.tempVars.length) {
|
||
|
|
state.g.buffer =
|
||
|
|
state.g.buffer.slice(0, index) +
|
||
|
|
'var ' + state.localScope.tempVars.join(', ') + ';' +
|
||
|
|
state.g.buffer.slice(index);
|
||
|
|
state.localScope.tempVars = [];
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
exports.analyzeAndTraverse = analyzeAndTraverse;
|
||
|
|
exports.append = append;
|
||
|
|
exports.catchup = catchup;
|
||
|
|
exports.catchupNewlines = catchupNewlines;
|
||
|
|
exports.catchupWhiteOut = catchupWhiteOut;
|
||
|
|
exports.catchupWhiteSpace = catchupWhiteSpace;
|
||
|
|
exports.containsChildMatching = containsChildMatching;
|
||
|
|
exports.containsChildOfType = containsChildOfType;
|
||
|
|
exports.createState = createState;
|
||
|
|
exports.declareIdentInLocalScope = declareIdentInLocalScope;
|
||
|
|
exports.getBoundaryNode = getBoundaryNode;
|
||
|
|
exports.getDocblock = getDocblock;
|
||
|
|
exports.getLexicalBindingMetadata = getLexicalBindingMetadata;
|
||
|
|
exports.getLocalBindingMetadata = getLocalBindingMetadata;
|
||
|
|
exports.getNextSyntacticCharOffset = getNextSyntacticCharOffset;
|
||
|
|
exports.getNodeSourceText = getNodeSourceText;
|
||
|
|
exports.getOrderedChildren = getOrderedChildren;
|
||
|
|
exports.getTempVar = getTempVar;
|
||
|
|
exports.identInLocalScope = identInLocalScope;
|
||
|
|
exports.identWithinLexicalScope = identWithinLexicalScope;
|
||
|
|
exports.indentBefore = indentBefore;
|
||
|
|
exports.initScopeMetadata = initScopeMetadata;
|
||
|
|
exports.injectTempVar = injectTempVar;
|
||
|
|
exports.injectTempVarDeclarations = injectTempVarDeclarations;
|
||
|
|
exports.move = move;
|
||
|
|
exports.scopeTypes = scopeTypes;
|
||
|
|
exports.updateIndent = updateIndent;
|
||
|
|
exports.updateState = updateState;
|