1
- var lexerFunctions = require ( "./lexerFunctions " ) ;
1
+ var lexerHelpers = require ( "./lexerHelpers " ) ;
2
2
3
3
module . exports = function ( code ) {
4
4
@@ -60,23 +60,23 @@ module.exports = function(code) {
60
60
STATE . lastFunction = STATE . insideFunction [ STATE . insideFunction . length - 1 ] ;
61
61
62
62
// handles new lines
63
- if ( lexerFunctions . handleNewLine ( STATE ) ) {
63
+ if ( lexerHelpers . handleNewLine ( STATE ) ) {
64
64
continue
65
65
}
66
66
67
67
// handles comments
68
- if ( lexerFunctions . checkForCommentStart ( STATE ) ) {
68
+ if ( lexerHelpers . checkForCommentStart ( STATE ) ) {
69
69
continue ;
70
70
}
71
- if ( lexerFunctions . handleComment ( STATE ) ) {
71
+ if ( lexerHelpers . handleComment ( STATE ) ) {
72
72
continue ;
73
73
}
74
- if ( lexerFunctions . checkIfInsideComment ( STATE ) ) {
74
+ if ( lexerHelpers . checkIfInsideComment ( STATE ) ) {
75
75
continue ;
76
76
}
77
77
78
78
// ignores chunks that are solely whitespace
79
- if ( lexerFunctions . checkForWhitespace ( STATE . chunk ) ) {
79
+ if ( lexerHelpers . checkForWhitespace ( STATE . chunk ) ) {
80
80
STATE . advanceAndClear ( 1 ) ;
81
81
continue ;
82
82
}
@@ -89,36 +89,36 @@ module.exports = function(code) {
89
89
}
90
90
91
91
// handles numbers
92
- if ( lexerFunctions . handleNumber ( STATE ) === true ) {
92
+ if ( lexerHelpers . handleNumber ( STATE ) === true ) {
93
93
STATE . advanceAndClear ( 1 ) ;
94
94
continue ;
95
- } else if ( lexerFunctions . handleNumber ( STATE ) === "skip" ) {
96
- lexerFunctions . handleEndOfFile ( STATE . nextCol , STATE . tokens ) ;
95
+ } else if ( lexerHelpers . handleNumber ( STATE ) === "skip" ) {
96
+ lexerHelpers . handleEndOfFile ( STATE . nextCol , STATE . tokens ) ;
97
97
STATE . advance ( 2 ) ;
98
98
continue ;
99
99
}
100
100
101
101
// handles ranges
102
- if ( lexerFunctions . handleRange ( STATE ) ) {
102
+ if ( lexerHelpers . handleRange ( STATE ) ) {
103
103
continue ;
104
104
}
105
105
106
106
// handles string interpolation
107
- if ( lexerFunctions . checkForStringInterpolationStart ( STATE ) ) {
107
+ if ( lexerHelpers . checkForStringInterpolationStart ( STATE ) ) {
108
108
continue ;
109
109
}
110
- if ( lexerFunctions . checkForStringInterpolationEnd ( STATE ) ) {
110
+ if ( lexerHelpers . checkForStringInterpolationEnd ( STATE ) ) {
111
111
continue ;
112
112
}
113
113
114
114
// tokenizes return arrows
115
115
if ( STATE . currCol === "-" && STATE . nextCol === ">" ) {
116
- lexerFunctions . checkFor ( STATE , 'FUNCTION_DECLARATION' , "->" , STATE . tokens ) ;
116
+ lexerHelpers . checkFor ( STATE , 'FUNCTION_DECLARATION' , "->" , STATE . tokens ) ;
117
117
if ( STATE . insideFunction . length ) {
118
118
STATE . lastFunction . returnArrows . push ( STATE . tokens . length - 1 ) ;
119
119
} else {
120
120
STATE . variableArrows . push ( STATE . tokens . length - 1 ) ;
121
- lexerFunctions . rewriteVariableParensHistory ( STATE ) ;
121
+ lexerHelpers . rewriteVariableParensHistory ( STATE ) ;
122
122
}
123
123
STATE . advanceAndClear ( 2 ) ;
124
124
continue ;
@@ -134,45 +134,45 @@ module.exports = function(code) {
134
134
}
135
135
136
136
// handles start and end of function invocations
137
- if ( lexerFunctions . handleFunctionInvocationStart ( STATE ) ) {
137
+ if ( lexerHelpers . handleFunctionInvocationStart ( STATE ) ) {
138
138
continue ;
139
139
}
140
- if ( lexerFunctions . handleFunctionInvocationEnd ( STATE ) ) {
140
+ if ( lexerHelpers . handleFunctionInvocationEnd ( STATE ) ) {
141
141
continue ;
142
142
}
143
143
144
144
// tuple handling
145
- if ( lexerFunctions . checkForTupleStart ( STATE ) ) {
145
+ if ( lexerHelpers . checkForTupleStart ( STATE ) ) {
146
146
continue ;
147
147
}
148
- if ( STATE . insideTuple . status && lexerFunctions . handleTuple ( STATE ) ) {
148
+ if ( STATE . insideTuple . status && lexerHelpers . handleTuple ( STATE ) ) {
149
149
continue ;
150
150
}
151
- if ( lexerFunctions . checkForTupleEnd ( STATE ) ) {
152
- lexerFunctions . handleEndOfFile ( STATE . nextCol , STATE . tokens ) ;
151
+ if ( lexerHelpers . checkForTupleEnd ( STATE ) ) {
152
+ lexerHelpers . handleEndOfFile ( STATE . nextCol , STATE . tokens ) ;
153
153
continue ;
154
154
}
155
155
156
156
// handles parentheses inside of the function invocation
157
- if ( lexerFunctions . handleFunctionInvocationInside ( STATE ) ) {
157
+ if ( lexerHelpers . handleFunctionInvocationInside ( STATE ) ) {
158
158
continue ;
159
159
}
160
160
161
161
//handling functions declarations
162
- if ( lexerFunctions . handleFunctionDeclarationStart ( STATE ) ) {
162
+ if ( lexerHelpers . handleFunctionDeclarationStart ( STATE ) ) {
163
163
continue ;
164
164
}
165
- if ( lexerFunctions . handleInsideOfFunctionDeclaration ( STATE ) ) {
165
+ if ( lexerHelpers . handleInsideOfFunctionDeclaration ( STATE ) ) {
166
166
continue ;
167
167
}
168
- if ( lexerFunctions . handleFunctionDeclarationEnd ( STATE ) ) {
168
+ if ( lexerHelpers . handleFunctionDeclarationEnd ( STATE ) ) {
169
169
continue ;
170
170
}
171
171
172
172
// collection initializer handling
173
173
if ( STATE . tokens . length && STATE . currCol === '(' &&
174
174
( STATE . lastToken . type === 'ARRAY_END' || STATE . lastToken . type === 'DICTIONARY_END' ) ) {
175
- lexerFunctions . checkFor ( STATE , 'FUNCTION_INVOCATION' , STATE . currCol , STATE . tokens ) ;
175
+ lexerHelpers . checkFor ( STATE , 'FUNCTION_INVOCATION' , STATE . currCol , STATE . tokens ) ;
176
176
var tmp = { } ;
177
177
tmp . name = STATE . lastToken . value ;
178
178
tmp . status = true ;
@@ -189,7 +189,7 @@ module.exports = function(code) {
189
189
}
190
190
191
191
// handles classes and structs
192
- if ( lexerFunctions . handleClassOrStruct ( STATE ) ) {
192
+ if ( lexerHelpers . handleClassOrStruct ( STATE ) ) {
193
193
continue ;
194
194
}
195
195
@@ -203,74 +203,74 @@ module.exports = function(code) {
203
203
}
204
204
205
205
// handles property access and method calls via dot notation
206
- if ( STATE . currCol === '.' && ! lexerFunctions . checkForWhitespace ( STATE . prevCol ) &&
207
- ! lexerFunctions . checkForWhitespace ( STATE . nextCol ) && (
206
+ if ( STATE . currCol === '.' && ! lexerHelpers . checkForWhitespace ( STATE . prevCol ) &&
207
+ ! lexerHelpers . checkForWhitespace ( STATE . nextCol ) && (
208
208
STATE . lastToken . type === 'IDENTIFIER' || STATE . lastToken . value === 'self' ||
209
209
STATE . lastToken . type === 'TYPE_PROPERTY' ) ) {
210
- lexerFunctions . makeToken ( undefined , STATE . chunk , STATE . tokens , 'DOT_SYNTAX' , '.' ) ;
210
+ lexerHelpers . makeToken ( undefined , STATE . chunk , STATE . tokens , 'DOT_SYNTAX' , '.' ) ;
211
211
STATE . advanceAndClear ( 1 ) ;
212
212
continue ;
213
213
}
214
214
215
215
// evaluation block that executes if the lexer is not inside a string,
216
216
// not inside a number, and an appropriate evaluation point has been reached
217
217
if ( ! STATE . insideString && ! STATE . insideNumber &&
218
- lexerFunctions . checkForEvaluationPoint ( STATE ) ) {
218
+ lexerHelpers . checkForEvaluationPoint ( STATE ) ) {
219
219
220
220
// identifies tuple elements names following dot syntax lookups
221
221
if ( STATE . lastToken && STATE . lastToken . type === 'DOT_SYNTAX' && STATE . TUPLE_ELEMENT_NAMES [ STATE . chunk ] ) {
222
- lexerFunctions . makeToken ( undefined , undefined , STATE . tokens , 'TUPLE_ELEMENT_NAME' , STATE . chunk ) ;
222
+ lexerHelpers . makeToken ( undefined , undefined , STATE . tokens , 'TUPLE_ELEMENT_NAME' , STATE . chunk ) ;
223
223
224
224
// invokes helper function to determine whether a collection is an array or dictionary
225
225
// upon identification of certain punctuation
226
226
} else if ( STATE . insideCollection . length && STATE . lastCollection . type === undefined &&
227
- lexerFunctions . checkFor ( STATE , 'PUNCTUATION' , STATE . chunk , STATE . tokens ) ) {
228
- lexerFunctions . determineCollectionType ( STATE ) ;
227
+ lexerHelpers . checkFor ( STATE , 'PUNCTUATION' , STATE . chunk , STATE . tokens ) ) {
228
+ lexerHelpers . determineCollectionType ( STATE ) ;
229
229
230
230
// handles the last square bracket arrays and dictionaries appropriately
231
231
} else if ( STATE . insideCollection . length && STATE . currCol === ']' && ! STATE . subscriptLookup ) {
232
- lexerFunctions . checkFor ( STATE , 'COLLECTION' , STATE . chunk , STATE . tokens , function ( ) {
232
+ lexerHelpers . checkFor ( STATE , 'COLLECTION' , STATE . chunk , STATE . tokens , function ( ) {
233
233
STATE . tokens [ STATE . tokens . length - 1 ] . type = STATE . lastCollection . type || 'ARRAY_END' ;
234
234
STATE . insideCollection . pop ( ) ;
235
235
} ) ;
236
236
237
237
// handles the opens square bracket of arrays and dictionaries
238
238
} else if ( STATE . tokens . length && STATE . lastToken . type !== 'IDENTIFIER' &&
239
239
STATE . lastToken . type !== 'SUBSCRIPT_LOOKUP_END' && STATE . currCol === '[' ) {
240
- lexerFunctions . checkFor ( STATE , 'COLLECTION' , STATE . chunk , STATE . tokens , function ( ) {
240
+ lexerHelpers . checkFor ( STATE , 'COLLECTION' , STATE . chunk , STATE . tokens , function ( ) {
241
241
STATE . insideCollection . push ( { type : undefined , location : STATE . tokens . length - 1 } ) ; } )
242
242
243
243
// default, fallthrough evaluation of chunk based on lexical precedence
244
244
} else {
245
- lexerFunctions . checkFor ( STATE , 'KEYWORD' , STATE . chunk , STATE . tokens ) ||
246
- lexerFunctions . checkFor ( STATE , 'NATIVE_METHOD' , STATE . chunk , STATE . tokens ) ||
247
- lexerFunctions . checkFor ( STATE , 'METHOD_ARGUMENT_NAME' , STATE . chunk , STATE . tokens ) ||
248
- lexerFunctions . checkFor ( STATE , 'TYPE_PROPERTY' , STATE . chunk , STATE . tokens ) ||
249
- lexerFunctions . checkFor ( STATE , 'TYPE' , STATE . chunk , STATE . tokens ) ||
250
- lexerFunctions . checkFor ( STATE , 'PUNCTUATION' , STATE . chunk , STATE . tokens ) ||
251
- lexerFunctions . checkFor ( STATE , 'SUBSCRIPT_LOOKUP' , STATE . chunk , STATE . tokens , function ( ) {
245
+ lexerHelpers . checkFor ( STATE , 'KEYWORD' , STATE . chunk , STATE . tokens ) ||
246
+ lexerHelpers . checkFor ( STATE , 'NATIVE_METHOD' , STATE . chunk , STATE . tokens ) ||
247
+ lexerHelpers . checkFor ( STATE , 'METHOD_ARGUMENT_NAME' , STATE . chunk , STATE . tokens ) ||
248
+ lexerHelpers . checkFor ( STATE , 'TYPE_PROPERTY' , STATE . chunk , STATE . tokens ) ||
249
+ lexerHelpers . checkFor ( STATE , 'TYPE' , STATE . chunk , STATE . tokens ) ||
250
+ lexerHelpers . checkFor ( STATE , 'PUNCTUATION' , STATE . chunk , STATE . tokens ) ||
251
+ lexerHelpers . checkFor ( STATE , 'SUBSCRIPT_LOOKUP' , STATE . chunk , STATE . tokens , function ( ) {
252
252
STATE . subscriptLookup = ! STATE . subscriptLookup ;
253
253
} ) ||
254
- lexerFunctions . checkFor ( STATE , 'OPERATOR' , STATE . chunk , STATE . tokens ) ||
255
- lexerFunctions . checkFor ( STATE , 'TERMINATOR' , STATE . chunk , STATE . tokens ) ||
256
- lexerFunctions . checkForIdentifier ( STATE ) ||
257
- lexerFunctions . checkForLiteral ( STATE . chunk , STATE . tokens ) ;
254
+ lexerHelpers . checkFor ( STATE , 'OPERATOR' , STATE . chunk , STATE . tokens ) ||
255
+ lexerHelpers . checkFor ( STATE , 'TERMINATOR' , STATE . chunk , STATE . tokens ) ||
256
+ lexerHelpers . checkForIdentifier ( STATE ) ||
257
+ lexerHelpers . checkForLiteral ( STATE . chunk , STATE . tokens ) ;
258
258
}
259
259
260
260
STATE . clearChunk ( ) ;
261
261
262
262
// special evaluation point handling
263
- if ( lexerFunctions . checkForWhitespace ( STATE . nextCol ) ) {
263
+ if ( lexerHelpers . checkForWhitespace ( STATE . nextCol ) ) {
264
264
STATE . advance ( 1 ) ;
265
265
}
266
- lexerFunctions . handleEndOfFile ( STATE . nextCol , STATE . tokens ) ;
266
+ lexerHelpers . handleEndOfFile ( STATE . nextCol , STATE . tokens ) ;
267
267
268
268
}
269
269
STATE . advance ( 1 ) ;
270
270
}
271
271
272
272
if ( STATE . tokens [ STATE . tokens . length - 1 ] . value === '\\n' ) {
273
- lexerFunctions . makeToken ( undefined , undefined , STATE . tokens , 'TERMINATOR' , 'EOF' ) ;
273
+ lexerHelpers . makeToken ( undefined , undefined , STATE . tokens , 'TERMINATOR' , 'EOF' ) ;
274
274
}
275
275
276
276
return STATE . tokens ;
0 commit comments