lexer.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. #include "pocketpy/lexer.h"
  2. namespace pkpy{
  3. bool Lexer::match_n_chars(int n, char c0){
  4. const char* c = curr_char;
  5. for(int i=0; i<n; i++){
  6. if(*c == '\0') return false;
  7. if(*c != c0) return false;
  8. c++;
  9. }
  10. for(int i=0; i<n; i++) eatchar_include_newline();
  11. return true;
  12. }
  13. bool Lexer::match_string(const char* s){
  14. int s_len = strlen(s);
  15. bool ok = strncmp(curr_char, s, s_len) == 0;
  16. if(ok) for(int i=0; i<s_len; i++) eatchar_include_newline();
  17. return ok;
  18. }
  19. int Lexer::eat_spaces(){
  20. int count = 0;
  21. while (true) {
  22. switch (peekchar()) {
  23. case ' ' : count+=1; break;
  24. case '\t': count+=4; break;
  25. default: return count;
  26. }
  27. eatchar();
  28. }
  29. }
  30. bool Lexer::eat_indentation(){
  31. if(brackets_level > 0) return true;
  32. int spaces = eat_spaces();
  33. if(peekchar() == '#') skip_line_comment();
  34. if(peekchar() == '\0' || peekchar() == '\n') return true;
  35. // https://docs.python.org/3/reference/lexical_analysis.html#indentation
  36. if(spaces > indents.top()){
  37. indents.push(spaces);
  38. nexts.push_back(Token{TK("@indent"), token_start, 0, current_line, brackets_level});
  39. } else if(spaces < indents.top()){
  40. while(spaces < indents.top()){
  41. indents.pop();
  42. nexts.push_back(Token{TK("@dedent"), token_start, 0, current_line, brackets_level});
  43. }
  44. if(spaces != indents.top()){
  45. return false;
  46. }
  47. }
  48. return true;
  49. }
  50. char Lexer::eatchar() {
  51. char c = peekchar();
  52. if(c == '\n') throw std::runtime_error("eatchar() cannot consume a newline");
  53. curr_char++;
  54. return c;
  55. }
  56. char Lexer::eatchar_include_newline() {
  57. char c = peekchar();
  58. curr_char++;
  59. if (c == '\n'){
  60. current_line++;
  61. src->line_starts.push_back(curr_char);
  62. }
  63. return c;
  64. }
  65. int Lexer::eat_name() {
  66. curr_char--;
  67. while(true){
  68. unsigned char c = peekchar();
  69. int u8bytes = utf8len(c, true);
  70. if(u8bytes == 0) return 1;
  71. if(u8bytes == 1){
  72. if(isalpha(c) || c=='_' || isdigit(c)) {
  73. curr_char++;
  74. continue;
  75. }else{
  76. break;
  77. }
  78. }
  79. // handle multibyte char
  80. std::string u8str(curr_char, u8bytes);
  81. if(u8str.size() != u8bytes) return 2;
  82. uint32_t value = 0;
  83. for(int k=0; k < u8bytes; k++){
  84. uint8_t b = u8str[k];
  85. if(k==0){
  86. if(u8bytes == 2) value = (b & 0b00011111) << 6;
  87. else if(u8bytes == 3) value = (b & 0b00001111) << 12;
  88. else if(u8bytes == 4) value = (b & 0b00000111) << 18;
  89. }else{
  90. value |= (b & 0b00111111) << (6*(u8bytes-k-1));
  91. }
  92. }
  93. if(is_unicode_Lo_char(value)) curr_char += u8bytes;
  94. else break;
  95. }
  96. int length = (int)(curr_char - token_start);
  97. if(length == 0) return 3;
  98. std::string_view name(token_start, length);
  99. if(src->mode == JSON_MODE){
  100. if(name == "true"){
  101. add_token(TK("True"));
  102. } else if(name == "false"){
  103. add_token(TK("False"));
  104. } else if(name == "null"){
  105. add_token(TK("None"));
  106. } else {
  107. return 4;
  108. }
  109. return 0;
  110. }
  111. if(kTokenKwMap.count(name)){
  112. add_token(kTokenKwMap.at(name));
  113. } else {
  114. add_token(TK("@id"));
  115. }
  116. return 0;
  117. }
  118. void Lexer::skip_line_comment() {
  119. char c;
  120. while ((c = peekchar()) != '\0') {
  121. if (c == '\n') return;
  122. eatchar();
  123. }
  124. }
  125. bool Lexer::matchchar(char c) {
  126. if (peekchar() != c) return false;
  127. eatchar_include_newline();
  128. return true;
  129. }
  130. void Lexer::add_token(TokenIndex type, TokenValue value) {
  131. switch(type){
  132. case TK("{"): case TK("["): case TK("("): brackets_level++; break;
  133. case TK(")"): case TK("]"): case TK("}"): brackets_level--; break;
  134. }
  135. auto token = Token{
  136. type,
  137. token_start,
  138. (int)(curr_char - token_start),
  139. current_line - ((type == TK("@eol")) ? 1 : 0),
  140. brackets_level,
  141. value
  142. };
  143. // handle "not in", "is not", "yield from"
  144. if(!nexts.empty()){
  145. auto& back = nexts.back();
  146. if(back.type == TK("not") && type == TK("in")){
  147. back.type = TK("not in");
  148. return;
  149. }
  150. if(back.type == TK("is") && type == TK("not")){
  151. back.type = TK("is not");
  152. return;
  153. }
  154. if(back.type == TK("yield") && type == TK("from")){
  155. back.type = TK("yield from");
  156. return;
  157. }
  158. nexts.push_back(token);
  159. }
  160. }
  161. void Lexer::add_token_2(char c, TokenIndex one, TokenIndex two) {
  162. if (matchchar(c)) add_token(two);
  163. else add_token(one);
  164. }
  165. Str Lexer::eat_string_until(char quote, bool raw) {
  166. bool quote3 = match_n_chars(2, quote);
  167. std::vector<char> buff;
  168. while (true) {
  169. char c = eatchar_include_newline();
  170. if (c == quote){
  171. if(quote3 && !match_n_chars(2, quote)){
  172. buff.push_back(c);
  173. continue;
  174. }
  175. break;
  176. }
  177. if (c == '\0'){
  178. if(quote3 && src->mode == REPL_MODE){
  179. throw NeedMoreLines(false);
  180. }
  181. SyntaxError("EOL while scanning string literal");
  182. }
  183. if (c == '\n'){
  184. if(!quote3) SyntaxError("EOL while scanning string literal");
  185. else{
  186. buff.push_back(c);
  187. continue;
  188. }
  189. }
  190. if (!raw && c == '\\') {
  191. switch (eatchar_include_newline()) {
  192. case '"': buff.push_back('"'); break;
  193. case '\'': buff.push_back('\''); break;
  194. case '\\': buff.push_back('\\'); break;
  195. case 'n': buff.push_back('\n'); break;
  196. case 'r': buff.push_back('\r'); break;
  197. case 't': buff.push_back('\t'); break;
  198. case 'x': {
  199. char hex[3] = {eatchar(), eatchar(), '\0'};
  200. size_t parsed;
  201. char code;
  202. try{
  203. code = (char)Number::stoi(hex, &parsed, 16);
  204. }catch(std::invalid_argument&){
  205. SyntaxError("invalid hex char");
  206. }
  207. if (parsed != 2) SyntaxError("invalid hex char");
  208. buff.push_back(code);
  209. } break;
  210. default: SyntaxError("invalid escape char");
  211. }
  212. } else {
  213. buff.push_back(c);
  214. }
  215. }
  216. return Str(buff.data(), buff.size());
  217. }
  218. void Lexer::eat_string(char quote, StringType type) {
  219. Str s = eat_string_until(quote, type == RAW_STRING);
  220. if(type == F_STRING){
  221. add_token(TK("@fstr"), s);
  222. }else{
  223. add_token(TK("@str"), s);
  224. }
  225. }
  226. void Lexer::eat_number() {
  227. static const std::regex pattern("^(0x)?[0-9a-fA-F]+(\\.[0-9]+)?(L)?");
  228. std::smatch m;
  229. const char* i = token_start;
  230. while(*i != '\n' && *i != '\0') i++;
  231. std::string s = std::string(token_start, i);
  232. bool ok = std::regex_search(s, m, pattern);
  233. PK_ASSERT(ok);
  234. // here is m.length()-1, since the first char was eaten by lex_token()
  235. for(int j=0; j<m.length()-1; j++) eatchar();
  236. if(m[3].matched){
  237. add_token(TK("@long"));
  238. return;
  239. }
  240. try{
  241. int base = 10;
  242. size_t size;
  243. if (m[1].matched) base = 16;
  244. if (m[2].matched) {
  245. if(base == 16) SyntaxError("hex literal should not contain a dot");
  246. add_token(TK("@num"), Number::stof(m[0], &size));
  247. } else {
  248. add_token(TK("@num"), Number::stoi(m[0], &size, base));
  249. }
  250. PK_ASSERT((int)size == (int)m.length());
  251. }catch(std::exception& e){
  252. PK_UNUSED(e);
  253. SyntaxError("invalid number literal");
  254. }
  255. }
  256. bool Lexer::lex_one_token() {
  257. while (peekchar() != '\0') {
  258. token_start = curr_char;
  259. char c = eatchar_include_newline();
  260. switch (c) {
  261. case '\'': case '"': eat_string(c, NORMAL_STRING); return true;
  262. case '#': skip_line_comment(); break;
  263. case '{': add_token(TK("{")); return true;
  264. case '}': add_token(TK("}")); return true;
  265. case ',': add_token(TK(",")); return true;
  266. case ':': add_token(TK(":")); return true;
  267. case ';': add_token(TK(";")); return true;
  268. case '(': add_token(TK("(")); return true;
  269. case ')': add_token(TK(")")); return true;
  270. case '[': add_token(TK("[")); return true;
  271. case ']': add_token(TK("]")); return true;
  272. case '@': add_token(TK("@")); return true;
  273. case '$': {
  274. for(int i=TK("$goto"); i<=TK("$label"); i++){
  275. // +1 to skip the '$'
  276. if(match_string(TK_STR(i) + 1)){
  277. add_token((TokenIndex)i);
  278. return true;
  279. }
  280. }
  281. SyntaxError("invalid special token");
  282. } return false;
  283. case '%': add_token_2('=', TK("%"), TK("%=")); return true;
  284. case '&': add_token_2('=', TK("&"), TK("&=")); return true;
  285. case '|': add_token_2('=', TK("|"), TK("|=")); return true;
  286. case '^': add_token_2('=', TK("^"), TK("^=")); return true;
  287. case '?': add_token(TK("?")); return true;
  288. case '.': {
  289. if(matchchar('.')) {
  290. if(matchchar('.')) {
  291. add_token(TK("..."));
  292. } else {
  293. SyntaxError("invalid token '..'");
  294. }
  295. } else {
  296. add_token(TK("."));
  297. }
  298. return true;
  299. }
  300. case '=': add_token_2('=', TK("="), TK("==")); return true;
  301. case '+':
  302. if(matchchar('+')){
  303. add_token(TK("++"));
  304. }else{
  305. add_token_2('=', TK("+"), TK("+="));
  306. }
  307. return true;
  308. case '>': {
  309. if(matchchar('=')) add_token(TK(">="));
  310. else if(matchchar('>')) add_token_2('=', TK(">>"), TK(">>="));
  311. else add_token(TK(">"));
  312. return true;
  313. }
  314. case '<': {
  315. if(matchchar('=')) add_token(TK("<="));
  316. else if(matchchar('<')) add_token_2('=', TK("<<"), TK("<<="));
  317. else add_token(TK("<"));
  318. return true;
  319. }
  320. case '-': {
  321. if(matchchar('-')){
  322. add_token(TK("--"));
  323. }else{
  324. if(matchchar('=')) add_token(TK("-="));
  325. else if(matchchar('>')) add_token(TK("->"));
  326. else add_token(TK("-"));
  327. }
  328. return true;
  329. }
  330. case '!':
  331. if(matchchar('=')) add_token(TK("!="));
  332. else SyntaxError("expected '=' after '!'");
  333. break;
  334. case '*':
  335. if (matchchar('*')) {
  336. add_token(TK("**")); // '**'
  337. } else {
  338. add_token_2('=', TK("*"), TK("*="));
  339. }
  340. return true;
  341. case '/':
  342. if(matchchar('/')) {
  343. add_token_2('=', TK("//"), TK("//="));
  344. } else {
  345. add_token_2('=', TK("/"), TK("/="));
  346. }
  347. return true;
  348. case ' ': case '\t': eat_spaces(); break;
  349. case '\n': {
  350. add_token(TK("@eol"));
  351. if(!eat_indentation()) IndentationError("unindent does not match any outer indentation level");
  352. return true;
  353. }
  354. default: {
  355. if(c == 'f'){
  356. if(matchchar('\'')) {eat_string('\'', F_STRING); return true;}
  357. if(matchchar('"')) {eat_string('"', F_STRING); return true;}
  358. }else if(c == 'r'){
  359. if(matchchar('\'')) {eat_string('\'', RAW_STRING); return true;}
  360. if(matchchar('"')) {eat_string('"', RAW_STRING); return true;}
  361. }
  362. if (c >= '0' && c <= '9') {
  363. eat_number();
  364. return true;
  365. }
  366. switch (eat_name())
  367. {
  368. case 0: break;
  369. case 1: SyntaxError("invalid char: " + std::string(1, c)); break;
  370. case 2: SyntaxError("invalid utf8 sequence: " + std::string(1, c)); break;
  371. case 3: SyntaxError("@id contains invalid char"); break;
  372. case 4: SyntaxError("invalid JSON token"); break;
  373. default: FATAL_ERROR();
  374. }
  375. return true;
  376. }
  377. }
  378. }
  379. token_start = curr_char;
  380. while(indents.size() > 1){
  381. indents.pop();
  382. add_token(TK("@dedent"));
  383. return true;
  384. }
  385. add_token(TK("@eof"));
  386. return false;
  387. }
  388. void Lexer::throw_err(Str type, Str msg){
  389. int lineno = current_line;
  390. const char* cursor = curr_char;
  391. if(peekchar() == '\n'){
  392. lineno--;
  393. cursor--;
  394. }
  395. throw_err(type, msg, lineno, cursor);
  396. }
  397. void Lexer::throw_err(Str type, Str msg, int lineno, const char* cursor){
  398. auto e = Exception(type, msg);
  399. e.st_push(src->snapshot(lineno, cursor));
  400. throw e;
  401. }
  402. Lexer::Lexer(shared_ptr<SourceData> src) {
  403. this->src = src;
  404. this->token_start = src->source.c_str();
  405. this->curr_char = src->source.c_str();
  406. this->nexts.push_back(Token{TK("@sof"), token_start, 0, current_line, brackets_level});
  407. this->indents.push(0);
  408. }
  409. std::vector<Token> Lexer::run() {
  410. if(used) FATAL_ERROR();
  411. used = true;
  412. while (lex_one_token());
  413. return std::move(nexts);
  414. }
  415. } // namespace pkpy