lexer.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. #pragma once
  2. #include "common.h"
  3. #include "error.h"
  4. #include "str.h"
  5. namespace pkpy{
  6. typedef uint8_t TokenIndex;
  7. constexpr const char* kTokens[] = {
  8. "is not", "not in", "yield from",
  9. "@eof", "@eol", "@sof",
  10. "@id", "@num", "@str", "@fstr",
  11. "@indent", "@dedent",
  12. /*****************************************/
  13. "+", "+=", "-", "-=", // (INPLACE_OP - 1) can get '=' removed
  14. "*", "*=", "/", "/=", "//", "//=", "%", "%=",
  15. "&", "&=", "|", "|=", "^", "^=",
  16. "<<", "<<=", ">>", ">>=",
  17. /*****************************************/
  18. ".", ",", ":", ";", "#", "(", ")", "[", "]", "{", "}",
  19. "**", "=", ">", "<", "...", "->", "?", "@", "==", "!=", ">=", "<=",
  20. /** KW_BEGIN **/
  21. "class", "import", "as", "def", "lambda", "pass", "del", "from", "with", "yield",
  22. "None", "in", "is", "and", "or", "not", "True", "False", "global", "try", "except", "finally",
  23. "goto", "label", // extended keywords, not available in cpython
  24. "while", "for", "if", "elif", "else", "break", "continue", "return", "assert", "raise"
  25. };
  26. using TokenValue = std::variant<std::monostate, i64, f64, Str>;
  27. const TokenIndex kTokenCount = sizeof(kTokens) / sizeof(kTokens[0]);
  28. constexpr TokenIndex TK(const char token[]) {
  29. for(int k=0; k<kTokenCount; k++){
  30. const char* i = kTokens[k];
  31. const char* j = token;
  32. while(*i && *j && *i == *j) { i++; j++;}
  33. if(*i == *j) return k;
  34. }
  35. #ifdef __GNUC__
  36. // for old version of gcc, it is not smart enough to ignore FATAL_ERROR()
  37. // so we must do a normal return
  38. return 255;
  39. #else
  40. FATAL_ERROR();
  41. #endif
  42. }
  43. #define TK_STR(t) kTokens[t]
  44. const std::map<std::string_view, TokenIndex> kTokenKwMap = [](){
  45. std::map<std::string_view, TokenIndex> map;
  46. for(int k=TK("class"); k<kTokenCount; k++) map[kTokens[k]] = k;
  47. return map;
  48. }();
  49. struct Token{
  50. TokenIndex type;
  51. const char* start;
  52. int length;
  53. int line;
  54. TokenValue value;
  55. Str str() const { return Str(start, length);}
  56. std::string_view sv() const { return std::string_view(start, length);}
  57. std::string info() const {
  58. std::stringstream ss;
  59. ss << line << ": " << TK_STR(type) << " '" << (
  60. sv()=="\n" ? "\\n" : sv()
  61. ) << "'";
  62. return ss.str();
  63. }
  64. };
  65. // https://docs.python.org/3/reference/expressions.html#operator-precedence
  66. enum Precedence {
  67. PREC_NONE,
  68. PREC_TUPLE, // ,
  69. PREC_LAMBDA, // lambda
  70. PREC_TERNARY, // ?:
  71. PREC_LOGICAL_OR, // or
  72. PREC_LOGICAL_AND, // and
  73. PREC_LOGICAL_NOT, // not
  74. PREC_EQUALITY, // == !=
  75. PREC_TEST, // in / is / is not / not in
  76. PREC_COMPARISION, // < > <= >=
  77. PREC_BITWISE_OR, // |
  78. PREC_BITWISE_XOR, // ^
  79. PREC_BITWISE_AND, // &
  80. PREC_BITWISE_SHIFT, // << >>
  81. PREC_TERM, // + -
  82. PREC_FACTOR, // * / % //
  83. PREC_UNARY, // - not
  84. PREC_EXPONENT, // **
  85. PREC_CALL, // ()
  86. PREC_SUBSCRIPT, // []
  87. PREC_ATTRIB, // .index
  88. PREC_PRIMARY,
  89. };
  90. enum StringType { NORMAL_STRING, RAW_STRING, F_STRING };
  91. struct Lexer {
  92. shared_ptr<SourceData> src;
  93. const char* token_start;
  94. const char* curr_char;
  95. int current_line = 1;
  96. std::vector<Token> nexts;
  97. stack<int> indents;
  98. int brackets_level = 0;
  99. bool used = false;
  100. char peekchar() const{ return *curr_char; }
  101. bool match_n_chars(int n, char c0){
  102. const char* c = curr_char;
  103. for(int i=0; i<n; i++){
  104. if(*c == '\0') return false;
  105. if(*c != c0) return false;
  106. c++;
  107. }
  108. for(int i=0; i<n; i++) eatchar_include_newline();
  109. return true;
  110. }
  111. int eat_spaces(){
  112. int count = 0;
  113. while (true) {
  114. switch (peekchar()) {
  115. case ' ' : count+=1; break;
  116. case '\t': count+=4; break;
  117. default: return count;
  118. }
  119. eatchar();
  120. }
  121. }
  122. bool eat_indentation(){
  123. if(brackets_level > 0) return true;
  124. int spaces = eat_spaces();
  125. if(peekchar() == '#') skip_line_comment();
  126. if(peekchar() == '\0' || peekchar() == '\n') return true;
  127. // https://docs.python.org/3/reference/lexical_analysis.html#indentation
  128. if(spaces > indents.top()){
  129. indents.push(spaces);
  130. nexts.push_back(Token{TK("@indent"), token_start, 0, current_line});
  131. } else if(spaces < indents.top()){
  132. while(spaces < indents.top()){
  133. indents.pop();
  134. nexts.push_back(Token{TK("@dedent"), token_start, 0, current_line});
  135. }
  136. if(spaces != indents.top()){
  137. return false;
  138. }
  139. }
  140. return true;
  141. }
  142. char eatchar() {
  143. char c = peekchar();
  144. if(c == '\n') throw std::runtime_error("eatchar() cannot consume a newline");
  145. curr_char++;
  146. return c;
  147. }
  148. char eatchar_include_newline() {
  149. char c = peekchar();
  150. curr_char++;
  151. if (c == '\n'){
  152. current_line++;
  153. src->line_starts.push_back(curr_char);
  154. }
  155. return c;
  156. }
  157. int eat_name() {
  158. curr_char--;
  159. while(true){
  160. unsigned char c = peekchar();
  161. int u8bytes = utf8len(c, true);
  162. if(u8bytes == 0) return 1;
  163. if(u8bytes == 1){
  164. if(isalpha(c) || c=='_' || isdigit(c)) {
  165. curr_char++;
  166. continue;
  167. }else{
  168. break;
  169. }
  170. }
  171. // handle multibyte char
  172. std::string u8str(curr_char, u8bytes);
  173. if(u8str.size() != u8bytes) return 2;
  174. uint32_t value = 0;
  175. for(int k=0; k < u8bytes; k++){
  176. uint8_t b = u8str[k];
  177. if(k==0){
  178. if(u8bytes == 2) value = (b & 0b00011111) << 6;
  179. else if(u8bytes == 3) value = (b & 0b00001111) << 12;
  180. else if(u8bytes == 4) value = (b & 0b00000111) << 18;
  181. }else{
  182. value |= (b & 0b00111111) << (6*(u8bytes-k-1));
  183. }
  184. }
  185. if(is_unicode_Lo_char(value)) curr_char += u8bytes;
  186. else break;
  187. }
  188. int length = (int)(curr_char - token_start);
  189. if(length == 0) return 3;
  190. std::string_view name(token_start, length);
  191. if(src->mode == JSON_MODE){
  192. if(name == "true"){
  193. add_token(TK("True"));
  194. } else if(name == "false"){
  195. add_token(TK("False"));
  196. } else if(name == "null"){
  197. add_token(TK("None"));
  198. } else {
  199. return 4;
  200. }
  201. return 0;
  202. }
  203. if(kTokenKwMap.count(name)){
  204. if(name == "not"){
  205. if(strncmp(curr_char, " in", 3) == 0){
  206. curr_char += 3;
  207. add_token(TK("not in"));
  208. return 0;
  209. }
  210. }else if(name == "is"){
  211. if(strncmp(curr_char, " not", 4) == 0){
  212. curr_char += 4;
  213. add_token(TK("is not"));
  214. return 0;
  215. }
  216. }else if(name == "yield"){
  217. if(strncmp(curr_char, " from", 5) == 0){
  218. curr_char += 5;
  219. add_token(TK("yield from"));
  220. return 0;
  221. }
  222. }
  223. add_token(kTokenKwMap.at(name));
  224. } else {
  225. add_token(TK("@id"));
  226. }
  227. return 0;
  228. }
  229. void skip_line_comment() {
  230. char c;
  231. while ((c = peekchar()) != '\0') {
  232. if (c == '\n') return;
  233. eatchar();
  234. }
  235. }
  236. bool matchchar(char c) {
  237. if (peekchar() != c) return false;
  238. eatchar_include_newline();
  239. return true;
  240. }
  241. void add_token(TokenIndex type, TokenValue value={}) {
  242. switch(type){
  243. case TK("{"): case TK("["): case TK("("): brackets_level++; break;
  244. case TK(")"): case TK("]"): case TK("}"): brackets_level--; break;
  245. }
  246. nexts.push_back( Token{
  247. type,
  248. token_start,
  249. (int)(curr_char - token_start),
  250. current_line - ((type == TK("@eol")) ? 1 : 0),
  251. value
  252. });
  253. }
  254. void add_token_2(char c, TokenIndex one, TokenIndex two) {
  255. if (matchchar(c)) add_token(two);
  256. else add_token(one);
  257. }
  258. Str eat_string_until(char quote, bool raw) {
  259. bool quote3 = match_n_chars(2, quote);
  260. std::vector<char> buff;
  261. while (true) {
  262. char c = eatchar_include_newline();
  263. if (c == quote){
  264. if(quote3 && !match_n_chars(2, quote)){
  265. buff.push_back(c);
  266. continue;
  267. }
  268. break;
  269. }
  270. if (c == '\0'){
  271. if(quote3 && src->mode == REPL_MODE){
  272. throw NeedMoreLines(false);
  273. }
  274. SyntaxError("EOL while scanning string literal");
  275. }
  276. if (c == '\n'){
  277. if(!quote3) SyntaxError("EOL while scanning string literal");
  278. else{
  279. buff.push_back(c);
  280. continue;
  281. }
  282. }
  283. if (!raw && c == '\\') {
  284. switch (eatchar_include_newline()) {
  285. case '"': buff.push_back('"'); break;
  286. case '\'': buff.push_back('\''); break;
  287. case '\\': buff.push_back('\\'); break;
  288. case 'n': buff.push_back('\n'); break;
  289. case 'r': buff.push_back('\r'); break;
  290. case 't': buff.push_back('\t'); break;
  291. default: SyntaxError("invalid escape char");
  292. }
  293. } else {
  294. buff.push_back(c);
  295. }
  296. }
  297. return Str(buff.data(), buff.size());
  298. }
  299. void eat_string(char quote, StringType type) {
  300. Str s = eat_string_until(quote, type == RAW_STRING);
  301. if(type == F_STRING){
  302. add_token(TK("@fstr"), s);
  303. }else{
  304. add_token(TK("@str"), s);
  305. }
  306. }
  307. void eat_number() {
  308. static const std::regex pattern("^(0x)?[0-9a-fA-F]+(\\.[0-9]+)?");
  309. std::smatch m;
  310. const char* i = token_start;
  311. while(*i != '\n' && *i != '\0') i++;
  312. std::string s = std::string(token_start, i);
  313. try{
  314. if (std::regex_search(s, m, pattern)) {
  315. // here is m.length()-1, since the first char was eaten by lex_token()
  316. for(int j=0; j<m.length()-1; j++) eatchar();
  317. int base = 10;
  318. size_t size;
  319. if (m[1].matched) base = 16;
  320. if (m[2].matched) {
  321. if(base == 16) SyntaxError("hex literal should not contain a dot");
  322. add_token(TK("@num"), Number::stof(m[0], &size));
  323. } else {
  324. add_token(TK("@num"), Number::stoi(m[0], &size, base));
  325. }
  326. if (size != m.length()) FATAL_ERROR();
  327. }
  328. }catch(std::exception& _){
  329. SyntaxError("invalid number literal");
  330. }
  331. }
  332. bool lex_one_token() {
  333. while (peekchar() != '\0') {
  334. token_start = curr_char;
  335. char c = eatchar_include_newline();
  336. switch (c) {
  337. case '\'': case '"': eat_string(c, NORMAL_STRING); return true;
  338. case '#': skip_line_comment(); break;
  339. case '{': add_token(TK("{")); return true;
  340. case '}': add_token(TK("}")); return true;
  341. case ',': add_token(TK(",")); return true;
  342. case ':': add_token(TK(":")); return true;
  343. case ';': add_token(TK(";")); return true;
  344. case '(': add_token(TK("(")); return true;
  345. case ')': add_token(TK(")")); return true;
  346. case '[': add_token(TK("[")); return true;
  347. case ']': add_token(TK("]")); return true;
  348. case '@': add_token(TK("@")); return true;
  349. case '%': add_token_2('=', TK("%"), TK("%=")); return true;
  350. case '&': add_token_2('=', TK("&"), TK("&=")); return true;
  351. case '|': add_token_2('=', TK("|"), TK("|=")); return true;
  352. case '^': add_token_2('=', TK("^"), TK("^=")); return true;
  353. case '?': add_token(TK("?")); return true;
  354. case '.': {
  355. if(matchchar('.')) {
  356. if(matchchar('.')) {
  357. add_token(TK("..."));
  358. } else {
  359. SyntaxError("invalid token '..'");
  360. }
  361. } else {
  362. add_token(TK("."));
  363. }
  364. return true;
  365. }
  366. case '=': add_token_2('=', TK("="), TK("==")); return true;
  367. case '+': add_token_2('=', TK("+"), TK("+=")); return true;
  368. case '>': {
  369. if(matchchar('=')) add_token(TK(">="));
  370. else if(matchchar('>')) add_token_2('=', TK(">>"), TK(">>="));
  371. else add_token(TK(">"));
  372. return true;
  373. }
  374. case '<': {
  375. if(matchchar('=')) add_token(TK("<="));
  376. else if(matchchar('<')) add_token_2('=', TK("<<"), TK("<<="));
  377. else add_token(TK("<"));
  378. return true;
  379. }
  380. case '-': {
  381. if(matchchar('=')) add_token(TK("-="));
  382. else if(matchchar('>')) add_token(TK("->"));
  383. else add_token(TK("-"));
  384. return true;
  385. }
  386. case '!':
  387. if(matchchar('=')) add_token(TK("!="));
  388. else SyntaxError("expected '=' after '!'");
  389. break;
  390. case '*':
  391. if (matchchar('*')) {
  392. add_token(TK("**")); // '**'
  393. } else {
  394. add_token_2('=', TK("*"), TK("*="));
  395. }
  396. return true;
  397. case '/':
  398. if(matchchar('/')) {
  399. add_token_2('=', TK("//"), TK("//="));
  400. } else {
  401. add_token_2('=', TK("/"), TK("/="));
  402. }
  403. return true;
  404. case ' ': case '\t': eat_spaces(); break;
  405. case '\n': {
  406. add_token(TK("@eol"));
  407. if(!eat_indentation()) IndentationError("unindent does not match any outer indentation level");
  408. return true;
  409. }
  410. default: {
  411. if(c == 'f'){
  412. if(matchchar('\'')) {eat_string('\'', F_STRING); return true;}
  413. if(matchchar('"')) {eat_string('"', F_STRING); return true;}
  414. }else if(c == 'r'){
  415. if(matchchar('\'')) {eat_string('\'', RAW_STRING); return true;}
  416. if(matchchar('"')) {eat_string('"', RAW_STRING); return true;}
  417. }
  418. if (c >= '0' && c <= '9') {
  419. eat_number();
  420. return true;
  421. }
  422. switch (eat_name())
  423. {
  424. case 0: break;
  425. case 1: SyntaxError("invalid char: " + std::string(1, c));
  426. case 2: SyntaxError("invalid utf8 sequence: " + std::string(1, c));
  427. case 3: SyntaxError("@id contains invalid char"); break;
  428. case 4: SyntaxError("invalid JSON token"); break;
  429. default: FATAL_ERROR();
  430. }
  431. return true;
  432. }
  433. }
  434. }
  435. token_start = curr_char;
  436. while(indents.size() > 1){
  437. indents.pop();
  438. add_token(TK("@dedent"));
  439. return true;
  440. }
  441. add_token(TK("@eof"));
  442. return false;
  443. }
  444. /***** Error Reporter *****/
  445. void throw_err(Str type, Str msg){
  446. int lineno = current_line;
  447. const char* cursor = curr_char;
  448. if(peekchar() == '\n'){
  449. lineno--;
  450. cursor--;
  451. }
  452. throw_err(type, msg, lineno, cursor);
  453. }
  454. void throw_err(Str type, Str msg, int lineno, const char* cursor){
  455. auto e = Exception("SyntaxError", msg);
  456. e.st_push(src->snapshot(lineno, cursor));
  457. throw e;
  458. }
  459. void SyntaxError(Str msg){ throw_err("SyntaxError", msg); }
  460. void SyntaxError(){ throw_err("SyntaxError", "invalid syntax"); }
  461. void IndentationError(Str msg){ throw_err("IndentationError", msg); }
  462. Lexer(shared_ptr<SourceData> src) {
  463. this->src = src;
  464. this->token_start = src->source.c_str();
  465. this->curr_char = src->source.c_str();
  466. this->nexts.push_back(Token{TK("@sof"), token_start, 0, current_line});
  467. this->indents.push(0);
  468. }
  469. std::vector<Token> run() {
  470. if(used) FATAL_ERROR();
  471. used = true;
  472. while (lex_one_token());
  473. return std::move(nexts);
  474. }
  475. };
  476. } // namespace pkpy