1/*
2 * This file is part of the MicroPython project, http://micropython.org/
3 *
4 * The MIT License (MIT)
5 *
6 * Copyright (c) 2013, 2014 Damien P. George
7 * Copyright (c) 2014-2018 Paul Sokolovsky
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this software and associated documentation files (the "Software"), to deal
11 * in the Software without restriction, including without limitation the rights
12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 * copies of the Software, and to permit persons to whom the Software is
14 * furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 * THE SOFTWARE.
26 */
27
28#include <stdarg.h>
29#include <stdio.h>
30#include <string.h>
31#include <assert.h>
32
33#include "py/parsenum.h"
34#include "py/compile.h"
35#include "py/objstr.h"
36#include "py/objtuple.h"
37#include "py/objlist.h"
38#include "py/objtype.h"
39#include "py/objmodule.h"
40#include "py/objgenerator.h"
41#include "py/smallint.h"
42#include "py/runtime.h"
43#include "py/builtin.h"
44#include "py/stackctrl.h"
45#include "py/gc.h"
46
47#if MICROPY_DEBUG_VERBOSE // print debugging info
48#define DEBUG_PRINT (1)
49#define DEBUG_printf DEBUG_printf
50#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
51#else // don't print debugging info
52#define DEBUG_printf(...) (void)0
53#define DEBUG_OP_printf(...) (void)0
54#endif
55
56const mp_obj_module_t mp_module___main__ = {
57 .base = { &mp_type_module },
58 .globals = (mp_obj_dict_t *)&MP_STATE_VM(dict_main),
59};
60
61void mp_init(void) {
62 qstr_init();
63
64 // no pending exceptions to start with
65 MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
66 #if MICROPY_ENABLE_SCHEDULER
67 MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
68 MP_STATE_VM(sched_idx) = 0;
69 MP_STATE_VM(sched_len) = 0;
70 #endif
71
72 #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
73 mp_init_emergency_exception_buf();
74 #endif
75
76 #if MICROPY_KBD_EXCEPTION
77 // initialise the exception object for raising KeyboardInterrupt
78 MP_STATE_VM(mp_kbd_exception).base.type = &mp_type_KeyboardInterrupt;
79 MP_STATE_VM(mp_kbd_exception).traceback_alloc = 0;
80 MP_STATE_VM(mp_kbd_exception).traceback_len = 0;
81 MP_STATE_VM(mp_kbd_exception).traceback_data = NULL;
82 MP_STATE_VM(mp_kbd_exception).args = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
83 #endif
84
85 #if MICROPY_ENABLE_COMPILER
86 // optimization disabled by default
87 MP_STATE_VM(mp_optimise_value) = 0;
88 #if MICROPY_EMIT_NATIVE
89 MP_STATE_VM(default_emit_opt) = MP_EMIT_OPT_NONE;
90 #endif
91 #endif
92
93 // init global module dict
94 mp_obj_dict_init(&MP_STATE_VM(mp_loaded_modules_dict), MICROPY_LOADED_MODULES_DICT_SIZE);
95
96 // initialise the __main__ module
97 mp_obj_dict_init(&MP_STATE_VM(dict_main), 1);
98 mp_obj_dict_store(MP_OBJ_FROM_PTR(&MP_STATE_VM(dict_main)), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
99
100 // locals = globals for outer module (see Objects/frameobject.c/PyFrame_New())
101 mp_locals_set(&MP_STATE_VM(dict_main));
102 mp_globals_set(&MP_STATE_VM(dict_main));
103
104 #if MICROPY_CAN_OVERRIDE_BUILTINS
105 // start with no extensions to builtins
106 MP_STATE_VM(mp_module_builtins_override_dict) = NULL;
107 #endif
108
109 #if MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
110 MP_STATE_VM(track_reloc_code_list) = MP_OBJ_NULL;
111 #endif
112
113 #if MICROPY_PY_OS_DUPTERM
114 for (size_t i = 0; i < MICROPY_PY_OS_DUPTERM; ++i) {
115 MP_STATE_VM(dupterm_objs[i]) = MP_OBJ_NULL;
116 }
117 #endif
118
119 #if MICROPY_VFS
120 // initialise the VFS sub-system
121 MP_STATE_VM(vfs_cur) = NULL;
122 MP_STATE_VM(vfs_mount_table) = NULL;
123 #endif
124
125 #if MICROPY_PY_SYS_ATEXIT
126 MP_STATE_VM(sys_exitfunc) = mp_const_none;
127 #endif
128
129 #if MICROPY_PY_SYS_SETTRACE
130 MP_STATE_THREAD(prof_trace_callback) = MP_OBJ_NULL;
131 MP_STATE_THREAD(prof_callback_is_executing) = false;
132 MP_STATE_THREAD(current_code_state) = NULL;
133 #endif
134
135 #if MICROPY_PY_BLUETOOTH
136 MP_STATE_VM(bluetooth) = MP_OBJ_NULL;
137 #endif
138
139 #if MICROPY_PY_THREAD_GIL
140 mp_thread_mutex_init(&MP_STATE_VM(gil_mutex));
141 #endif
142
143 // call port specific initialization if any
144 #ifdef MICROPY_PORT_INIT_FUNC
145 MICROPY_PORT_INIT_FUNC;
146 #endif
147
148 MP_THREAD_GIL_ENTER();
149}
150
151void mp_deinit(void) {
152 MP_THREAD_GIL_EXIT();
153
154 // call port specific deinitialization if any
155 #ifdef MICROPY_PORT_DEINIT_FUNC
156 MICROPY_PORT_DEINIT_FUNC;
157 #endif
158
159 // mp_obj_dict_free(&dict_main);
160 // mp_map_deinit(&MP_STATE_VM(mp_loaded_modules_map));
161}
162
163mp_obj_t mp_load_name(qstr qst) {
164 // logic: search locals, globals, builtins
165 DEBUG_OP_printf("load name %s\n", qstr_str(qst));
166 // If we're at the outer scope (locals == globals), dispatch to load_global right away
167 if (mp_locals_get() != mp_globals_get()) {
168 mp_map_elem_t *elem = mp_map_lookup(&mp_locals_get()->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
169 if (elem != NULL) {
170 return elem->value;
171 }
172 }
173 return mp_load_global(qst);
174}
175
176mp_obj_t mp_load_global(qstr qst) {
177 // logic: search globals, builtins
178 DEBUG_OP_printf("load global %s\n", qstr_str(qst));
179 mp_map_elem_t *elem = mp_map_lookup(&mp_globals_get()->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
180 if (elem == NULL) {
181 #if MICROPY_CAN_OVERRIDE_BUILTINS
182 if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
183 // lookup in additional dynamic table of builtins first
184 elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
185 if (elem != NULL) {
186 return elem->value;
187 }
188 }
189 #endif
190 elem = mp_map_lookup((mp_map_t *)&mp_module_builtins_globals.map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
191 if (elem == NULL) {
192 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
193 mp_raise_msg(&mp_type_NameError, MP_ERROR_TEXT("name not defined"));
194 #else
195 mp_raise_msg_varg(&mp_type_NameError, MP_ERROR_TEXT("name '%q' isn't defined"), qst);
196 #endif
197 }
198 }
199 return elem->value;
200}
201
202mp_obj_t mp_load_build_class(void) {
203 DEBUG_OP_printf("load_build_class\n");
204 #if MICROPY_CAN_OVERRIDE_BUILTINS
205 if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
206 // lookup in additional dynamic table of builtins first
207 mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
208 if (elem != NULL) {
209 return elem->value;
210 }
211 }
212 #endif
213 return MP_OBJ_FROM_PTR(&mp_builtin___build_class___obj);
214}
215
216void mp_store_name(qstr qst, mp_obj_t obj) {
217 DEBUG_OP_printf("store name %s <- %p\n", qstr_str(qst), obj);
218 mp_obj_dict_store(MP_OBJ_FROM_PTR(mp_locals_get()), MP_OBJ_NEW_QSTR(qst), obj);
219}
220
221void mp_delete_name(qstr qst) {
222 DEBUG_OP_printf("delete name %s\n", qstr_str(qst));
223 // TODO convert KeyError to NameError if qst not found
224 mp_obj_dict_delete(MP_OBJ_FROM_PTR(mp_locals_get()), MP_OBJ_NEW_QSTR(qst));
225}
226
227void mp_store_global(qstr qst, mp_obj_t obj) {
228 DEBUG_OP_printf("store global %s <- %p\n", qstr_str(qst), obj);
229 mp_obj_dict_store(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(qst), obj);
230}
231
232void mp_delete_global(qstr qst) {
233 DEBUG_OP_printf("delete global %s\n", qstr_str(qst));
234 // TODO convert KeyError to NameError if qst not found
235 mp_obj_dict_delete(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(qst));
236}
237
238mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg) {
239 DEBUG_OP_printf("unary " UINT_FMT " %q %p\n", op, mp_unary_op_method_name[op], arg);
240
241 if (op == MP_UNARY_OP_NOT) {
242 // "not x" is the negative of whether "x" is true per Python semantics
243 return mp_obj_new_bool(mp_obj_is_true(arg) == 0);
244 } else if (mp_obj_is_small_int(arg)) {
245 mp_int_t val = MP_OBJ_SMALL_INT_VALUE(arg);
246 switch (op) {
247 case MP_UNARY_OP_BOOL:
248 return mp_obj_new_bool(val != 0);
249 case MP_UNARY_OP_HASH:
250 return arg;
251 case MP_UNARY_OP_POSITIVE:
252 case MP_UNARY_OP_INT:
253 return arg;
254 case MP_UNARY_OP_NEGATIVE:
255 // check for overflow
256 if (val == MP_SMALL_INT_MIN) {
257 return mp_obj_new_int(-val);
258 } else {
259 return MP_OBJ_NEW_SMALL_INT(-val);
260 }
261 case MP_UNARY_OP_ABS:
262 if (val >= 0) {
263 return arg;
264 } else if (val == MP_SMALL_INT_MIN) {
265 // check for overflow
266 return mp_obj_new_int(-val);
267 } else {
268 return MP_OBJ_NEW_SMALL_INT(-val);
269 }
270 default:
271 assert(op == MP_UNARY_OP_INVERT);
272 return MP_OBJ_NEW_SMALL_INT(~val);
273 }
274 } else if (op == MP_UNARY_OP_HASH && mp_obj_is_str_or_bytes(arg)) {
275 // fast path for hashing str/bytes
276 GET_STR_HASH(arg, h);
277 if (h == 0) {
278 GET_STR_DATA_LEN(arg, data, len);
279 h = qstr_compute_hash(data, len);
280 }
281 return MP_OBJ_NEW_SMALL_INT(h);
282 } else {
283 const mp_obj_type_t *type = mp_obj_get_type(arg);
284 if (type->unary_op != NULL) {
285 mp_obj_t result = type->unary_op(op, arg);
286 if (result != MP_OBJ_NULL) {
287 return result;
288 }
289 }
290 // With MP_UNARY_OP_INT, mp_unary_op() becomes a fallback for mp_obj_get_int().
291 // In this case provide a more focused error message to not confuse, e.g. chr(1.0)
292 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
293 if (op == MP_UNARY_OP_INT) {
294 mp_raise_TypeError(MP_ERROR_TEXT("can't convert to int"));
295 } else {
296 mp_raise_TypeError(MP_ERROR_TEXT("unsupported type for operator"));
297 }
298 #else
299 if (op == MP_UNARY_OP_INT) {
300 mp_raise_msg_varg(&mp_type_TypeError,
301 MP_ERROR_TEXT("can't convert %s to int"), mp_obj_get_type_str(arg));
302 } else {
303 mp_raise_msg_varg(&mp_type_TypeError,
304 MP_ERROR_TEXT("unsupported type for %q: '%s'"),
305 mp_unary_op_method_name[op], mp_obj_get_type_str(arg));
306 }
307 #endif
308 }
309}
310
311mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) {
312 DEBUG_OP_printf("binary " UINT_FMT " %q %p %p\n", op, mp_binary_op_method_name[op], lhs, rhs);
313
314 // TODO correctly distinguish inplace operators for mutable objects
315 // lookup logic that CPython uses for +=:
316 // check for implemented +=
317 // then check for implemented +
318 // then check for implemented seq.inplace_concat
319 // then check for implemented seq.concat
320 // then fail
321 // note that list does not implement + or +=, so that inplace_concat is reached first for +=
322
323 // deal with is
324 if (op == MP_BINARY_OP_IS) {
325 return mp_obj_new_bool(lhs == rhs);
326 }
327
328 // deal with == and != for all types
329 if (op == MP_BINARY_OP_EQUAL || op == MP_BINARY_OP_NOT_EQUAL) {
330 // mp_obj_equal_not_equal supports a bunch of shortcuts
331 return mp_obj_equal_not_equal(op, lhs, rhs);
332 }
333
334 // deal with exception_match for all types
335 if (op == MP_BINARY_OP_EXCEPTION_MATCH) {
336 // rhs must be issubclass(rhs, BaseException)
337 if (mp_obj_is_exception_type(rhs)) {
338 if (mp_obj_exception_match(lhs, rhs)) {
339 return mp_const_true;
340 } else {
341 return mp_const_false;
342 }
343 } else if (mp_obj_is_type(rhs, &mp_type_tuple)) {
344 mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(rhs);
345 for (size_t i = 0; i < tuple->len; i++) {
346 rhs = tuple->items[i];
347 if (!mp_obj_is_exception_type(rhs)) {
348 goto unsupported_op;
349 }
350 if (mp_obj_exception_match(lhs, rhs)) {
351 return mp_const_true;
352 }
353 }
354 return mp_const_false;
355 }
356 goto unsupported_op;
357 }
358
359 if (mp_obj_is_small_int(lhs)) {
360 mp_int_t lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs);
361 if (mp_obj_is_small_int(rhs)) {
362 mp_int_t rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs);
363 // This is a binary operation: lhs_val op rhs_val
364 // We need to be careful to handle overflow; see CERT INT32-C
365 // Operations that can overflow:
366 // + result always fits in mp_int_t, then handled by SMALL_INT check
367 // - result always fits in mp_int_t, then handled by SMALL_INT check
368 // * checked explicitly
369 // / if lhs=MIN and rhs=-1; result always fits in mp_int_t, then handled by SMALL_INT check
370 // % if lhs=MIN and rhs=-1; result always fits in mp_int_t, then handled by SMALL_INT check
371 // << checked explicitly
372 switch (op) {
373 case MP_BINARY_OP_OR:
374 case MP_BINARY_OP_INPLACE_OR:
375 lhs_val |= rhs_val;
376 break;
377 case MP_BINARY_OP_XOR:
378 case MP_BINARY_OP_INPLACE_XOR:
379 lhs_val ^= rhs_val;
380 break;
381 case MP_BINARY_OP_AND:
382 case MP_BINARY_OP_INPLACE_AND:
383 lhs_val &= rhs_val;
384 break;
385 case MP_BINARY_OP_LSHIFT:
386 case MP_BINARY_OP_INPLACE_LSHIFT: {
387 if (rhs_val < 0) {
388 // negative shift not allowed
389 mp_raise_ValueError(MP_ERROR_TEXT("negative shift count"));
390 } else if (rhs_val >= (mp_int_t)(sizeof(lhs_val) * MP_BITS_PER_BYTE)
391 || lhs_val > (MP_SMALL_INT_MAX >> rhs_val)
392 || lhs_val < (MP_SMALL_INT_MIN >> rhs_val)) {
393 // left-shift will overflow, so use higher precision integer
394 lhs = mp_obj_new_int_from_ll(lhs_val);
395 goto generic_binary_op;
396 } else {
397 // use standard precision
398 lhs_val <<= rhs_val;
399 }
400 break;
401 }
402 case MP_BINARY_OP_RSHIFT:
403 case MP_BINARY_OP_INPLACE_RSHIFT:
404 if (rhs_val < 0) {
405 // negative shift not allowed
406 mp_raise_ValueError(MP_ERROR_TEXT("negative shift count"));
407 } else {
408 // standard precision is enough for right-shift
409 if (rhs_val >= (mp_int_t)(sizeof(lhs_val) * MP_BITS_PER_BYTE)) {
410 // Shifting to big amounts is underfined behavior
411 // in C and is CPU-dependent; propagate sign bit.
412 rhs_val = sizeof(lhs_val) * MP_BITS_PER_BYTE - 1;
413 }
414 lhs_val >>= rhs_val;
415 }
416 break;
417 case MP_BINARY_OP_ADD:
418 case MP_BINARY_OP_INPLACE_ADD:
419 lhs_val += rhs_val;
420 break;
421 case MP_BINARY_OP_SUBTRACT:
422 case MP_BINARY_OP_INPLACE_SUBTRACT:
423 lhs_val -= rhs_val;
424 break;
425 case MP_BINARY_OP_MULTIPLY:
426 case MP_BINARY_OP_INPLACE_MULTIPLY: {
427
428 // If long long type exists and is larger than mp_int_t, then
429 // we can use the following code to perform overflow-checked multiplication.
430 // Otherwise (eg in x64 case) we must use mp_small_int_mul_overflow.
431 #if 0
432 // compute result using long long precision
433 long long res = (long long)lhs_val * (long long)rhs_val;
434 if (res > MP_SMALL_INT_MAX || res < MP_SMALL_INT_MIN) {
435 // result overflowed SMALL_INT, so return higher precision integer
436 return mp_obj_new_int_from_ll(res);
437 } else {
438 // use standard precision
439 lhs_val = (mp_int_t)res;
440 }
441 #endif
442
443 if (mp_small_int_mul_overflow(lhs_val, rhs_val)) {
444 // use higher precision
445 lhs = mp_obj_new_int_from_ll(lhs_val);
446 goto generic_binary_op;
447 } else {
448 // use standard precision
449 return MP_OBJ_NEW_SMALL_INT(lhs_val * rhs_val);
450 }
451 }
452 case MP_BINARY_OP_FLOOR_DIVIDE:
453 case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
454 if (rhs_val == 0) {
455 goto zero_division;
456 }
457 lhs_val = mp_small_int_floor_divide(lhs_val, rhs_val);
458 break;
459
460 #if MICROPY_PY_BUILTINS_FLOAT
461 case MP_BINARY_OP_TRUE_DIVIDE:
462 case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
463 if (rhs_val == 0) {
464 goto zero_division;
465 }
466 return mp_obj_new_float((mp_float_t)lhs_val / (mp_float_t)rhs_val);
467 #endif
468
469 case MP_BINARY_OP_MODULO:
470 case MP_BINARY_OP_INPLACE_MODULO: {
471 if (rhs_val == 0) {
472 goto zero_division;
473 }
474 lhs_val = mp_small_int_modulo(lhs_val, rhs_val);
475 break;
476 }
477
478 case MP_BINARY_OP_POWER:
479 case MP_BINARY_OP_INPLACE_POWER:
480 if (rhs_val < 0) {
481 #if MICROPY_PY_BUILTINS_FLOAT
482 return mp_obj_float_binary_op(op, (mp_float_t)lhs_val, rhs);
483 #else
484 mp_raise_ValueError(MP_ERROR_TEXT("negative power with no float support"));
485 #endif
486 } else {
487 mp_int_t ans = 1;
488 while (rhs_val > 0) {
489 if (rhs_val & 1) {
490 if (mp_small_int_mul_overflow(ans, lhs_val)) {
491 goto power_overflow;
492 }
493 ans *= lhs_val;
494 }
495 if (rhs_val == 1) {
496 break;
497 }
498 rhs_val /= 2;
499 if (mp_small_int_mul_overflow(lhs_val, lhs_val)) {
500 goto power_overflow;
501 }
502 lhs_val *= lhs_val;
503 }
504 lhs_val = ans;
505 }
506 break;
507
508 power_overflow:
509 // use higher precision
510 lhs = mp_obj_new_int_from_ll(MP_OBJ_SMALL_INT_VALUE(lhs));
511 goto generic_binary_op;
512
513 case MP_BINARY_OP_DIVMOD: {
514 if (rhs_val == 0) {
515 goto zero_division;
516 }
517 // to reduce stack usage we don't pass a temp array of the 2 items
518 mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(2, NULL));
519 tuple->items[0] = MP_OBJ_NEW_SMALL_INT(mp_small_int_floor_divide(lhs_val, rhs_val));
520 tuple->items[1] = MP_OBJ_NEW_SMALL_INT(mp_small_int_modulo(lhs_val, rhs_val));
521 return MP_OBJ_FROM_PTR(tuple);
522 }
523
524 case MP_BINARY_OP_LESS:
525 return mp_obj_new_bool(lhs_val < rhs_val);
526 case MP_BINARY_OP_MORE:
527 return mp_obj_new_bool(lhs_val > rhs_val);
528 case MP_BINARY_OP_LESS_EQUAL:
529 return mp_obj_new_bool(lhs_val <= rhs_val);
530 case MP_BINARY_OP_MORE_EQUAL:
531 return mp_obj_new_bool(lhs_val >= rhs_val);
532
533 default:
534 goto unsupported_op;
535 }
536 // This is an inlined version of mp_obj_new_int, for speed
537 if (MP_SMALL_INT_FITS(lhs_val)) {
538 return MP_OBJ_NEW_SMALL_INT(lhs_val);
539 } else {
540 return mp_obj_new_int_from_ll(lhs_val);
541 }
542 #if MICROPY_PY_BUILTINS_FLOAT
543 } else if (mp_obj_is_float(rhs)) {
544 mp_obj_t res = mp_obj_float_binary_op(op, (mp_float_t)lhs_val, rhs);
545 if (res == MP_OBJ_NULL) {
546 goto unsupported_op;
547 } else {
548 return res;
549 }
550 #endif
551 #if MICROPY_PY_BUILTINS_COMPLEX
552 } else if (mp_obj_is_type(rhs, &mp_type_complex)) {
553 mp_obj_t res = mp_obj_complex_binary_op(op, (mp_float_t)lhs_val, 0, rhs);
554 if (res == MP_OBJ_NULL) {
555 goto unsupported_op;
556 } else {
557 return res;
558 }
559 #endif
560 }
561 }
562
563 // Convert MP_BINARY_OP_IN to MP_BINARY_OP_CONTAINS with swapped args.
564 if (op == MP_BINARY_OP_IN) {
565 op = MP_BINARY_OP_CONTAINS;
566 mp_obj_t temp = lhs;
567 lhs = rhs;
568 rhs = temp;
569 }
570
571 // generic binary_op supplied by type
572 const mp_obj_type_t *type;
573generic_binary_op:
574 type = mp_obj_get_type(lhs);
575 if (type->binary_op != NULL) {
576 mp_obj_t result = type->binary_op(op, lhs, rhs);
577 if (result != MP_OBJ_NULL) {
578 return result;
579 }
580 }
581
582 #if MICROPY_PY_REVERSE_SPECIAL_METHODS
583 if (op >= MP_BINARY_OP_OR && op <= MP_BINARY_OP_POWER) {
584 mp_obj_t t = rhs;
585 rhs = lhs;
586 lhs = t;
587 op += MP_BINARY_OP_REVERSE_OR - MP_BINARY_OP_OR;
588 goto generic_binary_op;
589 } else if (op >= MP_BINARY_OP_REVERSE_OR) {
590 // Convert __rop__ back to __op__ for error message
591 mp_obj_t t = rhs;
592 rhs = lhs;
593 lhs = t;
594 op -= MP_BINARY_OP_REVERSE_OR - MP_BINARY_OP_OR;
595 }
596 #endif
597
598 if (op == MP_BINARY_OP_CONTAINS) {
599 // If type didn't support containment then explicitly walk the iterator.
600 // mp_getiter will raise the appropriate exception if lhs is not iterable.
601 mp_obj_iter_buf_t iter_buf;
602 mp_obj_t iter = mp_getiter(lhs, &iter_buf);
603 mp_obj_t next;
604 while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
605 if (mp_obj_equal(next, rhs)) {
606 return mp_const_true;
607 }
608 }
609 return mp_const_false;
610 }
611
612unsupported_op:
613 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
614 mp_raise_TypeError(MP_ERROR_TEXT("unsupported type for operator"));
615 #else
616 mp_raise_msg_varg(&mp_type_TypeError,
617 MP_ERROR_TEXT("unsupported types for %q: '%s', '%s'"),
618 mp_binary_op_method_name[op], mp_obj_get_type_str(lhs), mp_obj_get_type_str(rhs));
619 #endif
620
621zero_division:
622 mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("divide by zero"));
623}
624
625mp_obj_t mp_call_function_0(mp_obj_t fun) {
626 return mp_call_function_n_kw(fun, 0, 0, NULL);
627}
628
629mp_obj_t mp_call_function_1(mp_obj_t fun, mp_obj_t arg) {
630 return mp_call_function_n_kw(fun, 1, 0, &arg);
631}
632
633mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2) {
634 mp_obj_t args[2];
635 args[0] = arg1;
636 args[1] = arg2;
637 return mp_call_function_n_kw(fun, 2, 0, args);
638}
639
640// args contains, eg: arg0 arg1 key0 value0 key1 value1
641mp_obj_t mp_call_function_n_kw(mp_obj_t fun_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
642 // TODO improve this: fun object can specify its type and we parse here the arguments,
643 // passing to the function arrays of fixed and keyword arguments
644
645 DEBUG_OP_printf("calling function %p(n_args=" UINT_FMT ", n_kw=" UINT_FMT ", args=%p)\n", fun_in, n_args, n_kw, args);
646
647 // get the type
648 const mp_obj_type_t *type = mp_obj_get_type(fun_in);
649
650 // do the call
651 if (type->call != NULL) {
652 return type->call(fun_in, n_args, n_kw, args);
653 }
654
655 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
656 mp_raise_TypeError(MP_ERROR_TEXT("object not callable"));
657 #else
658 mp_raise_msg_varg(&mp_type_TypeError,
659 MP_ERROR_TEXT("'%s' object isn't callable"), mp_obj_get_type_str(fun_in));
660 #endif
661}
662
663// args contains: fun self/NULL arg(0) ... arg(n_args-2) arg(n_args-1) kw_key(0) kw_val(0) ... kw_key(n_kw-1) kw_val(n_kw-1)
664// if n_args==0 and n_kw==0 then there are only fun and self/NULL
665mp_obj_t mp_call_method_n_kw(size_t n_args, size_t n_kw, const mp_obj_t *args) {
666 DEBUG_OP_printf("call method (fun=%p, self=%p, n_args=" UINT_FMT ", n_kw=" UINT_FMT ", args=%p)\n", args[0], args[1], n_args, n_kw, args);
667 int adjust = (args[1] == MP_OBJ_NULL) ? 0 : 1;
668 return mp_call_function_n_kw(args[0], n_args + adjust, n_kw, args + 2 - adjust);
669}
670
671// This function only needs to be exposed externally when in stackless mode.
672#if !MICROPY_STACKLESS
673STATIC
674#endif
675void mp_call_prepare_args_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_obj_t *args, mp_call_args_t *out_args) {
676 mp_obj_t fun = *args++;
677 mp_obj_t self = MP_OBJ_NULL;
678 if (have_self) {
679 self = *args++; // may be MP_OBJ_NULL
680 }
681 uint n_args = n_args_n_kw & 0xff;
682 uint n_kw = (n_args_n_kw >> 8) & 0xff;
683 mp_obj_t pos_seq = args[n_args + 2 * n_kw]; // may be MP_OBJ_NULL
684 mp_obj_t kw_dict = args[n_args + 2 * n_kw + 1]; // may be MP_OBJ_NULL
685
686 DEBUG_OP_printf("call method var (fun=%p, self=%p, n_args=%u, n_kw=%u, args=%p, seq=%p, dict=%p)\n", fun, self, n_args, n_kw, args, pos_seq, kw_dict);
687
688 // We need to create the following array of objects:
689 // args[0 .. n_args] unpacked(pos_seq) args[n_args .. n_args + 2 * n_kw] unpacked(kw_dict)
690 // TODO: optimize one day to avoid constructing new arg array? Will be hard.
691
692 // The new args array
693 mp_obj_t *args2;
694 uint args2_alloc;
695 uint args2_len = 0;
696
697 // Try to get a hint for the size of the kw_dict
698 uint kw_dict_len = 0;
699 if (kw_dict != MP_OBJ_NULL && mp_obj_is_type(kw_dict, &mp_type_dict)) {
700 kw_dict_len = mp_obj_dict_len(kw_dict);
701 }
702
703 // Extract the pos_seq sequence to the new args array.
704 // Note that it can be arbitrary iterator.
705 if (pos_seq == MP_OBJ_NULL) {
706 // no sequence
707
708 // allocate memory for the new array of args
709 args2_alloc = 1 + n_args + 2 * (n_kw + kw_dict_len);
710 args2 = mp_nonlocal_alloc(args2_alloc * sizeof(mp_obj_t));
711
712 // copy the self
713 if (self != MP_OBJ_NULL) {
714 args2[args2_len++] = self;
715 }
716
717 // copy the fixed pos args
718 mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t);
719 args2_len += n_args;
720
721 } else if (mp_obj_is_type(pos_seq, &mp_type_tuple) || mp_obj_is_type(pos_seq, &mp_type_list)) {
722 // optimise the case of a tuple and list
723
724 // get the items
725 size_t len;
726 mp_obj_t *items;
727 mp_obj_get_array(pos_seq, &len, &items);
728
729 // allocate memory for the new array of args
730 args2_alloc = 1 + n_args + len + 2 * (n_kw + kw_dict_len);
731 args2 = mp_nonlocal_alloc(args2_alloc * sizeof(mp_obj_t));
732
733 // copy the self
734 if (self != MP_OBJ_NULL) {
735 args2[args2_len++] = self;
736 }
737
738 // copy the fixed and variable position args
739 mp_seq_cat(args2 + args2_len, args, n_args, items, len, mp_obj_t);
740 args2_len += n_args + len;
741
742 } else {
743 // generic iterator
744
745 // allocate memory for the new array of args
746 args2_alloc = 1 + n_args + 2 * (n_kw + kw_dict_len) + 3;
747 args2 = mp_nonlocal_alloc(args2_alloc * sizeof(mp_obj_t));
748
749 // copy the self
750 if (self != MP_OBJ_NULL) {
751 args2[args2_len++] = self;
752 }
753
754 // copy the fixed position args
755 mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t);
756 args2_len += n_args;
757
758 // extract the variable position args from the iterator
759 mp_obj_iter_buf_t iter_buf;
760 mp_obj_t iterable = mp_getiter(pos_seq, &iter_buf);
761 mp_obj_t item;
762 while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
763 if (args2_len >= args2_alloc) {
764 args2 = mp_nonlocal_realloc(args2, args2_alloc * sizeof(mp_obj_t), args2_alloc * 2 * sizeof(mp_obj_t));
765 args2_alloc *= 2;
766 }
767 args2[args2_len++] = item;
768 }
769 }
770
771 // The size of the args2 array now is the number of positional args.
772 uint pos_args_len = args2_len;
773
774 // Copy the fixed kw args.
775 mp_seq_copy(args2 + args2_len, args + n_args, 2 * n_kw, mp_obj_t);
776 args2_len += 2 * n_kw;
777
778 // Extract (key,value) pairs from kw_dict dictionary and append to args2.
779 // Note that it can be arbitrary iterator.
780 if (kw_dict == MP_OBJ_NULL) {
781 // pass
782 } else if (mp_obj_is_type(kw_dict, &mp_type_dict)) {
783 // dictionary
784 mp_map_t *map = mp_obj_dict_get_map(kw_dict);
785 assert(args2_len + 2 * map->used <= args2_alloc); // should have enough, since kw_dict_len is in this case hinted correctly above
786 for (size_t i = 0; i < map->alloc; i++) {
787 if (mp_map_slot_is_filled(map, i)) {
788 // the key must be a qstr, so intern it if it's a string
789 mp_obj_t key = map->table[i].key;
790 if (!mp_obj_is_qstr(key)) {
791 key = mp_obj_str_intern_checked(key);
792 }
793 args2[args2_len++] = key;
794 args2[args2_len++] = map->table[i].value;
795 }
796 }
797 } else {
798 // generic mapping:
799 // - call keys() to get an iterable of all keys in the mapping
800 // - call __getitem__ for each key to get the corresponding value
801
802 // get the keys iterable
803 mp_obj_t dest[3];
804 mp_load_method(kw_dict, MP_QSTR_keys, dest);
805 mp_obj_t iterable = mp_getiter(mp_call_method_n_kw(0, 0, dest), NULL);
806
807 mp_obj_t key;
808 while ((key = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
809 // expand size of args array if needed
810 if (args2_len + 1 >= args2_alloc) {
811 uint new_alloc = args2_alloc * 2;
812 if (new_alloc < 4) {
813 new_alloc = 4;
814 }
815 args2 = mp_nonlocal_realloc(args2, args2_alloc * sizeof(mp_obj_t), new_alloc * sizeof(mp_obj_t));
816 args2_alloc = new_alloc;
817 }
818
819 // the key must be a qstr, so intern it if it's a string
820 if (!mp_obj_is_qstr(key)) {
821 key = mp_obj_str_intern_checked(key);
822 }
823
824 // get the value corresponding to the key
825 mp_load_method(kw_dict, MP_QSTR___getitem__, dest);
826 dest[2] = key;
827 mp_obj_t value = mp_call_method_n_kw(1, 0, dest);
828
829 // store the key/value pair in the argument array
830 args2[args2_len++] = key;
831 args2[args2_len++] = value;
832 }
833 }
834
835 out_args->fun = fun;
836 out_args->args = args2;
837 out_args->n_args = pos_args_len;
838 out_args->n_kw = (args2_len - pos_args_len) / 2;
839 out_args->n_alloc = args2_alloc;
840}
841
842mp_obj_t mp_call_method_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_obj_t *args) {
843 mp_call_args_t out_args;
844 mp_call_prepare_args_n_kw_var(have_self, n_args_n_kw, args, &out_args);
845
846 mp_obj_t res = mp_call_function_n_kw(out_args.fun, out_args.n_args, out_args.n_kw, out_args.args);
847 mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
848
849 return res;
850}
851
852// unpacked items are stored in reverse order into the array pointed to by items
853void mp_unpack_sequence(mp_obj_t seq_in, size_t num, mp_obj_t *items) {
854 size_t seq_len;
855 if (mp_obj_is_type(seq_in, &mp_type_tuple) || mp_obj_is_type(seq_in, &mp_type_list)) {
856 mp_obj_t *seq_items;
857 mp_obj_get_array(seq_in, &seq_len, &seq_items);
858 if (seq_len < num) {
859 goto too_short;
860 } else if (seq_len > num) {
861 goto too_long;
862 }
863 for (size_t i = 0; i < num; i++) {
864 items[i] = seq_items[num - 1 - i];
865 }
866 } else {
867 mp_obj_iter_buf_t iter_buf;
868 mp_obj_t iterable = mp_getiter(seq_in, &iter_buf);
869
870 for (seq_len = 0; seq_len < num; seq_len++) {
871 mp_obj_t el = mp_iternext(iterable);
872 if (el == MP_OBJ_STOP_ITERATION) {
873 goto too_short;
874 }
875 items[num - 1 - seq_len] = el;
876 }
877 if (mp_iternext(iterable) != MP_OBJ_STOP_ITERATION) {
878 goto too_long;
879 }
880 }
881 return;
882
883too_short:
884 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
885 mp_raise_ValueError(MP_ERROR_TEXT("wrong number of values to unpack"));
886 #else
887 mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("need more than %d values to unpack"), (int)seq_len);
888 #endif
889too_long:
890 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
891 mp_raise_ValueError(MP_ERROR_TEXT("wrong number of values to unpack"));
892 #else
893 mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("too many values to unpack (expected %d)"), (int)num);
894 #endif
895}
896
897// unpacked items are stored in reverse order into the array pointed to by items
898void mp_unpack_ex(mp_obj_t seq_in, size_t num_in, mp_obj_t *items) {
899 size_t num_left = num_in & 0xff;
900 size_t num_right = (num_in >> 8) & 0xff;
901 DEBUG_OP_printf("unpack ex " UINT_FMT " " UINT_FMT "\n", num_left, num_right);
902 size_t seq_len;
903 if (mp_obj_is_type(seq_in, &mp_type_tuple) || mp_obj_is_type(seq_in, &mp_type_list)) {
904 // Make the seq variable volatile so the compiler keeps a reference to it,
905 // since if it's a tuple then seq_items points to the interior of the GC cell
906 // and mp_obj_new_list may trigger a GC which doesn't trace this and reclaims seq.
907 volatile mp_obj_t seq = seq_in;
908 mp_obj_t *seq_items;
909 mp_obj_get_array(seq, &seq_len, &seq_items);
910 if (seq_len < num_left + num_right) {
911 goto too_short;
912 }
913 for (size_t i = 0; i < num_right; i++) {
914 items[i] = seq_items[seq_len - 1 - i];
915 }
916 items[num_right] = mp_obj_new_list(seq_len - num_left - num_right, seq_items + num_left);
917 for (size_t i = 0; i < num_left; i++) {
918 items[num_right + 1 + i] = seq_items[num_left - 1 - i];
919 }
920 seq = MP_OBJ_NULL;
921 } else {
922 // Generic iterable; this gets a bit messy: we unpack known left length to the
923 // items destination array, then the rest to a dynamically created list. Once the
924 // iterable is exhausted, we take from this list for the right part of the items.
925 // TODO Improve to waste less memory in the dynamically created list.
926 mp_obj_t iterable = mp_getiter(seq_in, NULL);
927 mp_obj_t item;
928 for (seq_len = 0; seq_len < num_left; seq_len++) {
929 item = mp_iternext(iterable);
930 if (item == MP_OBJ_STOP_ITERATION) {
931 goto too_short;
932 }
933 items[num_left + num_right + 1 - 1 - seq_len] = item;
934 }
935 mp_obj_list_t *rest = MP_OBJ_TO_PTR(mp_obj_new_list(0, NULL));
936 while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
937 mp_obj_list_append(MP_OBJ_FROM_PTR(rest), item);
938 }
939 if (rest->len < num_right) {
940 goto too_short;
941 }
942 items[num_right] = MP_OBJ_FROM_PTR(rest);
943 for (size_t i = 0; i < num_right; i++) {
944 items[num_right - 1 - i] = rest->items[rest->len - num_right + i];
945 }
946 mp_obj_list_set_len(MP_OBJ_FROM_PTR(rest), rest->len - num_right);
947 }
948 return;
949
950too_short:
951 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
952 mp_raise_ValueError(MP_ERROR_TEXT("wrong number of values to unpack"));
953 #else
954 mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("need more than %d values to unpack"), (int)seq_len);
955 #endif
956}
957
958mp_obj_t mp_load_attr(mp_obj_t base, qstr attr) {
959 DEBUG_OP_printf("load attr %p.%s\n", base, qstr_str(attr));
960 // use load_method
961 mp_obj_t dest[2];
962 mp_load_method(base, attr, dest);
963 if (dest[1] == MP_OBJ_NULL) {
964 // load_method returned just a normal attribute
965 return dest[0];
966 } else {
967 // load_method returned a method, so build a bound method object
968 return mp_obj_new_bound_meth(dest[0], dest[1]);
969 }
970}
971
972#if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
973
974// The following "checked fun" type is local to the mp_convert_member_lookup
975// function, and serves to check that the first argument to a builtin function
976// has the correct type.
977
978typedef struct _mp_obj_checked_fun_t {
979 mp_obj_base_t base;
980 const mp_obj_type_t *type;
981 mp_obj_t fun;
982} mp_obj_checked_fun_t;
983
984STATIC mp_obj_t checked_fun_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
985 mp_obj_checked_fun_t *self = MP_OBJ_TO_PTR(self_in);
986 if (n_args > 0) {
987 const mp_obj_type_t *arg0_type = mp_obj_get_type(args[0]);
988 if (arg0_type != self->type) {
989 #if MICROPY_ERROR_REPORTING != MICROPY_ERROR_REPORTING_DETAILED
990 mp_raise_TypeError(MP_ERROR_TEXT("argument has wrong type"));
991 #else
992 mp_raise_msg_varg(&mp_type_TypeError,
993 MP_ERROR_TEXT("argument should be a '%q' not a '%q'"), self->type->name, arg0_type->name);
994 #endif
995 }
996 }
997 return mp_call_function_n_kw(self->fun, n_args, n_kw, args);
998}
999
1000STATIC const mp_obj_type_t mp_type_checked_fun = {
1001 { &mp_type_type },
1002 .flags = MP_TYPE_FLAG_BINDS_SELF,
1003 .name = MP_QSTR_function,
1004 .call = checked_fun_call,
1005};
1006
1007STATIC mp_obj_t mp_obj_new_checked_fun(const mp_obj_type_t *type, mp_obj_t fun) {
1008 mp_obj_checked_fun_t *o = m_new_obj(mp_obj_checked_fun_t);
1009 o->base.type = &mp_type_checked_fun;
1010 o->type = type;
1011 o->fun = fun;
1012 return MP_OBJ_FROM_PTR(o);
1013}
1014
1015#endif // MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
1016
1017// Given a member that was extracted from an instance, convert it correctly
1018// and put the result in the dest[] array for a possible method call.
1019// Conversion means dealing with static/class methods, callables, and values.
1020// see http://docs.python.org/3/howto/descriptor.html
1021// and also https://mail.python.org/pipermail/python-dev/2015-March/138950.html
1022void mp_convert_member_lookup(mp_obj_t self, const mp_obj_type_t *type, mp_obj_t member, mp_obj_t *dest) {
1023 if (mp_obj_is_obj(member)) {
1024 const mp_obj_type_t *m_type = ((mp_obj_base_t *)MP_OBJ_TO_PTR(member))->type;
1025 if (m_type->flags & MP_TYPE_FLAG_BINDS_SELF) {
1026 // `member` is a function that binds self as its first argument.
1027 if (m_type->flags & MP_TYPE_FLAG_BUILTIN_FUN) {
1028 // `member` is a built-in function, which has special behaviour.
1029 if (mp_obj_is_instance_type(type)) {
1030 // Built-in functions on user types always behave like a staticmethod.
1031 dest[0] = member;
1032 }
1033 #if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
1034 else if (self == MP_OBJ_NULL && type != &mp_type_object) {
1035 // `member` is a built-in method without a first argument, so wrap
1036 // it in a type checker that will check self when it's supplied.
1037 // Note that object will do its own checking so shouldn't be wrapped.
1038 dest[0] = mp_obj_new_checked_fun(type, member);
1039 }
1040 #endif
1041 else {
1042 // Return a (built-in) bound method, with self being this object.
1043 dest[0] = member;
1044 dest[1] = self;
1045 }
1046 } else {
1047 // Return a bound method, with self being this object.
1048 dest[0] = member;
1049 dest[1] = self;
1050 }
1051 } else if (m_type == &mp_type_staticmethod) {
1052 // `member` is a staticmethod, return the function that it wraps.
1053 dest[0] = ((mp_obj_static_class_method_t *)MP_OBJ_TO_PTR(member))->fun;
1054 } else if (m_type == &mp_type_classmethod) {
1055 // `member` is a classmethod, return a bound method with self being the type of
1056 // this object. This type should be the type of the original instance, not the
1057 // base type (which is what is passed in the `type` argument to this function).
1058 if (self != MP_OBJ_NULL) {
1059 type = mp_obj_get_type(self);
1060 }
1061 dest[0] = ((mp_obj_static_class_method_t *)MP_OBJ_TO_PTR(member))->fun;
1062 dest[1] = MP_OBJ_FROM_PTR(type);
1063 } else {
1064 // `member` is a value, so just return that value.
1065 dest[0] = member;
1066 }
1067 } else {
1068 // `member` is a value, so just return that value.
1069 dest[0] = member;
1070 }
1071}
1072
1073// no attribute found, returns: dest[0] == MP_OBJ_NULL, dest[1] == MP_OBJ_NULL
1074// normal attribute found, returns: dest[0] == <attribute>, dest[1] == MP_OBJ_NULL
1075// method attribute found, returns: dest[0] == <method>, dest[1] == <self>
1076void mp_load_method_maybe(mp_obj_t obj, qstr attr, mp_obj_t *dest) {
1077 // clear output to indicate no attribute/method found yet
1078 dest[0] = MP_OBJ_NULL;
1079 dest[1] = MP_OBJ_NULL;
1080
1081 // get the type
1082 const mp_obj_type_t *type = mp_obj_get_type(obj);
1083
1084 // look for built-in names
1085 #if MICROPY_CPYTHON_COMPAT
1086 if (attr == MP_QSTR___class__) {
1087 // a.__class__ is equivalent to type(a)
1088 dest[0] = MP_OBJ_FROM_PTR(type);
1089 return;
1090 }
1091 #endif
1092
1093 if (attr == MP_QSTR___next__ && type->iternext != NULL) {
1094 dest[0] = MP_OBJ_FROM_PTR(&mp_builtin_next_obj);
1095 dest[1] = obj;
1096
1097 } else if (type->attr != NULL) {
1098 // this type can do its own load, so call it
1099 type->attr(obj, attr, dest);
1100
1101 } else if (type->locals_dict != NULL) {
1102 // generic method lookup
1103 // this is a lookup in the object (ie not class or type)
1104 assert(type->locals_dict->base.type == &mp_type_dict); // MicroPython restriction, for now
1105 mp_map_t *locals_map = &type->locals_dict->map;
1106 mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
1107 if (elem != NULL) {
1108 mp_convert_member_lookup(obj, type, elem->value, dest);
1109 }
1110 }
1111}
1112
1113void mp_load_method(mp_obj_t base, qstr attr, mp_obj_t *dest) {
1114 DEBUG_OP_printf("load method %p.%s\n", base, qstr_str(attr));
1115
1116 mp_load_method_maybe(base, attr, dest);
1117
1118 if (dest[0] == MP_OBJ_NULL) {
1119 // no attribute/method called attr
1120 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
1121 mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("no such attribute"));
1122 #else
1123 // following CPython, we give a more detailed error message for type objects
1124 if (mp_obj_is_type(base, &mp_type_type)) {
1125 mp_raise_msg_varg(&mp_type_AttributeError,
1126 MP_ERROR_TEXT("type object '%q' has no attribute '%q'"),
1127 ((mp_obj_type_t *)MP_OBJ_TO_PTR(base))->name, attr);
1128 } else {
1129 mp_raise_msg_varg(&mp_type_AttributeError,
1130 MP_ERROR_TEXT("'%s' object has no attribute '%q'"),
1131 mp_obj_get_type_str(base), attr);
1132 }
1133 #endif
1134 }
1135}
1136
1137// Acts like mp_load_method_maybe but catches AttributeError, and all other exceptions if requested
1138void mp_load_method_protected(mp_obj_t obj, qstr attr, mp_obj_t *dest, bool catch_all_exc) {
1139 nlr_buf_t nlr;
1140 if (nlr_push(&nlr) == 0) {
1141 mp_load_method_maybe(obj, attr, dest);
1142 nlr_pop();
1143 } else {
1144 if (!catch_all_exc
1145 && !mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t *)nlr.ret_val)->type),
1146 MP_OBJ_FROM_PTR(&mp_type_AttributeError))) {
1147 // Re-raise the exception
1148 nlr_raise(MP_OBJ_FROM_PTR(nlr.ret_val));
1149 }
1150 }
1151}
1152
1153void mp_store_attr(mp_obj_t base, qstr attr, mp_obj_t value) {
1154 DEBUG_OP_printf("store attr %p.%s <- %p\n", base, qstr_str(attr), value);
1155 const mp_obj_type_t *type = mp_obj_get_type(base);
1156 if (type->attr != NULL) {
1157 mp_obj_t dest[2] = {MP_OBJ_SENTINEL, value};
1158 type->attr(base, attr, dest);
1159 if (dest[0] == MP_OBJ_NULL) {
1160 // success
1161 return;
1162 }
1163 }
1164 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
1165 mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("no such attribute"));
1166 #else
1167 mp_raise_msg_varg(&mp_type_AttributeError,
1168 MP_ERROR_TEXT("'%s' object has no attribute '%q'"),
1169 mp_obj_get_type_str(base), attr);
1170 #endif
1171}
1172
1173mp_obj_t mp_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) {
1174 assert(o_in);
1175 const mp_obj_type_t *type = mp_obj_get_type(o_in);
1176
1177 // Check for native getiter which is the identity. We handle this case explicitly
1178 // so we don't unnecessarily allocate any RAM for the iter_buf, which won't be used.
1179 if (type->getiter == mp_identity_getiter) {
1180 return o_in;
1181 }
1182
1183 // check for native getiter (corresponds to __iter__)
1184 if (type->getiter != NULL) {
1185 if (iter_buf == NULL && type->getiter != mp_obj_instance_getiter) {
1186 // if caller did not provide a buffer then allocate one on the heap
1187 // mp_obj_instance_getiter is special, it will allocate only if needed
1188 iter_buf = m_new_obj(mp_obj_iter_buf_t);
1189 }
1190 mp_obj_t iter = type->getiter(o_in, iter_buf);
1191 if (iter != MP_OBJ_NULL) {
1192 return iter;
1193 }
1194 }
1195
1196 // check for __getitem__
1197 mp_obj_t dest[2];
1198 mp_load_method_maybe(o_in, MP_QSTR___getitem__, dest);
1199 if (dest[0] != MP_OBJ_NULL) {
1200 // __getitem__ exists, create and return an iterator
1201 if (iter_buf == NULL) {
1202 // if caller did not provide a buffer then allocate one on the heap
1203 iter_buf = m_new_obj(mp_obj_iter_buf_t);
1204 }
1205 return mp_obj_new_getitem_iter(dest, iter_buf);
1206 }
1207
1208 // object not iterable
1209 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
1210 mp_raise_TypeError(MP_ERROR_TEXT("object not iterable"));
1211 #else
1212 mp_raise_msg_varg(&mp_type_TypeError,
1213 MP_ERROR_TEXT("'%s' object isn't iterable"), mp_obj_get_type_str(o_in));
1214 #endif
1215
1216}
1217
1218// may return MP_OBJ_STOP_ITERATION as an optimisation instead of raise StopIteration()
1219// may also raise StopIteration()
1220mp_obj_t mp_iternext_allow_raise(mp_obj_t o_in) {
1221 const mp_obj_type_t *type = mp_obj_get_type(o_in);
1222 if (type->iternext != NULL) {
1223 return type->iternext(o_in);
1224 } else {
1225 // check for __next__ method
1226 mp_obj_t dest[2];
1227 mp_load_method_maybe(o_in, MP_QSTR___next__, dest);
1228 if (dest[0] != MP_OBJ_NULL) {
1229 // __next__ exists, call it and return its result
1230 return mp_call_method_n_kw(0, 0, dest);
1231 } else {
1232 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
1233 mp_raise_TypeError(MP_ERROR_TEXT("object not an iterator"));
1234 #else
1235 mp_raise_msg_varg(&mp_type_TypeError,
1236 MP_ERROR_TEXT("'%s' object isn't an iterator"), mp_obj_get_type_str(o_in));
1237 #endif
1238 }
1239 }
1240}
1241
1242// will always return MP_OBJ_STOP_ITERATION instead of raising StopIteration() (or any subclass thereof)
1243// may raise other exceptions
1244mp_obj_t mp_iternext(mp_obj_t o_in) {
1245 MP_STACK_CHECK(); // enumerate, filter, map and zip can recursively call mp_iternext
1246 const mp_obj_type_t *type = mp_obj_get_type(o_in);
1247 if (type->iternext != NULL) {
1248 return type->iternext(o_in);
1249 } else {
1250 // check for __next__ method
1251 mp_obj_t dest[2];
1252 mp_load_method_maybe(o_in, MP_QSTR___next__, dest);
1253 if (dest[0] != MP_OBJ_NULL) {
1254 // __next__ exists, call it and return its result
1255 nlr_buf_t nlr;
1256 if (nlr_push(&nlr) == 0) {
1257 mp_obj_t ret = mp_call_method_n_kw(0, 0, dest);
1258 nlr_pop();
1259 return ret;
1260 } else {
1261 if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t *)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
1262 return MP_OBJ_STOP_ITERATION;
1263 } else {
1264 nlr_jump(nlr.ret_val);
1265 }
1266 }
1267 } else {
1268 #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
1269 mp_raise_TypeError(MP_ERROR_TEXT("object not an iterator"));
1270 #else
1271 mp_raise_msg_varg(&mp_type_TypeError,
1272 MP_ERROR_TEXT("'%s' object isn't an iterator"), mp_obj_get_type_str(o_in));
1273 #endif
1274 }
1275 }
1276}
1277
1278// TODO: Unclear what to do with StopIterarion exception here.
1279mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) {
1280 assert((send_value != MP_OBJ_NULL) ^ (throw_value != MP_OBJ_NULL));
1281 const mp_obj_type_t *type = mp_obj_get_type(self_in);
1282
1283 if (type == &mp_type_gen_instance) {
1284 return mp_obj_gen_resume(self_in, send_value, throw_value, ret_val);
1285 }
1286
1287 if (type->iternext != NULL && send_value == mp_const_none) {
1288 mp_obj_t ret = type->iternext(self_in);
1289 *ret_val = ret;
1290 if (ret != MP_OBJ_STOP_ITERATION) {
1291 return MP_VM_RETURN_YIELD;
1292 } else {
1293 // Emulate raise StopIteration()
1294 // Special case, handled in vm.c
1295 return MP_VM_RETURN_NORMAL;
1296 }
1297 }
1298
1299 mp_obj_t dest[3]; // Reserve slot for send() arg
1300
1301 // Python instance iterator protocol
1302 if (send_value == mp_const_none) {
1303 mp_load_method_maybe(self_in, MP_QSTR___next__, dest);
1304 if (dest[0] != MP_OBJ_NULL) {
1305 *ret_val = mp_call_method_n_kw(0, 0, dest);
1306 return MP_VM_RETURN_YIELD;
1307 }
1308 }
1309
1310 // Either python instance generator protocol, or native object
1311 // generator protocol.
1312 if (send_value != MP_OBJ_NULL) {
1313 mp_load_method(self_in, MP_QSTR_send, dest);
1314 dest[2] = send_value;
1315 *ret_val = mp_call_method_n_kw(1, 0, dest);
1316 return MP_VM_RETURN_YIELD;
1317 }
1318
1319 assert(throw_value != MP_OBJ_NULL);
1320 {
1321 if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(throw_value)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
1322 mp_load_method_maybe(self_in, MP_QSTR_close, dest);
1323 if (dest[0] != MP_OBJ_NULL) {
1324 // TODO: Exceptions raised in close() are not propagated,
1325 // printed to sys.stderr
1326 *ret_val = mp_call_method_n_kw(0, 0, dest);
1327 // We assume one can't "yield" from close()
1328 return MP_VM_RETURN_NORMAL;
1329 }
1330 } else {
1331 mp_load_method_maybe(self_in, MP_QSTR_throw, dest);
1332 if (dest[0] != MP_OBJ_NULL) {
1333 dest[2] = throw_value;
1334 *ret_val = mp_call_method_n_kw(1, 0, dest);
1335 // If .throw() method returned, we assume it's value to yield
1336 // - any exception would be thrown with nlr_raise().
1337 return MP_VM_RETURN_YIELD;
1338 }
1339 }
1340 // If there's nowhere to throw exception into, then we assume that object
1341 // is just incapable to handle it, so any exception thrown into it
1342 // will be propagated up. This behavior is approved by test_pep380.py
1343 // test_delegation_of_close_to_non_generator(),
1344 // test_delegating_throw_to_non_generator()
1345 if (mp_obj_exception_match(throw_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
1346 // PEP479: if StopIteration is raised inside a generator it is replaced with RuntimeError
1347 *ret_val = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("generator raised StopIteration"));
1348 } else {
1349 *ret_val = mp_make_raise_obj(throw_value);
1350 }
1351 return MP_VM_RETURN_EXCEPTION;
1352 }
1353}
1354
1355mp_obj_t mp_make_raise_obj(mp_obj_t o) {
1356 DEBUG_printf("raise %p\n", o);
1357 if (mp_obj_is_exception_type(o)) {
1358 // o is an exception type (it is derived from BaseException (or is BaseException))
1359 // create and return a new exception instance by calling o
1360 // TODO could have an option to disable traceback, then builtin exceptions (eg TypeError)
1361 // could have const instances in ROM which we return here instead
1362 return mp_call_function_n_kw(o, 0, 0, NULL);
1363 } else if (mp_obj_is_exception_instance(o)) {
1364 // o is an instance of an exception, so use it as the exception
1365 return o;
1366 } else {
1367 // o cannot be used as an exception, so return a type error (which will be raised by the caller)
1368 return mp_obj_new_exception_msg(&mp_type_TypeError, MP_ERROR_TEXT("exceptions must derive from BaseException"));
1369 }
1370}
1371
1372mp_obj_t mp_import_name(qstr name, mp_obj_t fromlist, mp_obj_t level) {
1373 DEBUG_printf("import name '%s' level=%d\n", qstr_str(name), MP_OBJ_SMALL_INT_VALUE(level));
1374
1375 // build args array
1376 mp_obj_t args[5];
1377 args[0] = MP_OBJ_NEW_QSTR(name);
1378 args[1] = mp_const_none; // TODO should be globals
1379 args[2] = mp_const_none; // TODO should be locals
1380 args[3] = fromlist;
1381 args[4] = level;
1382
1383 #if MICROPY_CAN_OVERRIDE_BUILTINS
1384 // Lookup __import__ and call that if it exists
1385 mp_obj_dict_t *bo_dict = MP_STATE_VM(mp_module_builtins_override_dict);
1386 if (bo_dict != NULL) {
1387 mp_map_elem_t *import = mp_map_lookup(&bo_dict->map, MP_OBJ_NEW_QSTR(MP_QSTR___import__), MP_MAP_LOOKUP);
1388 if (import != NULL) {
1389 return mp_call_function_n_kw(import->value, 5, 0, args);
1390 }
1391 }
1392 #endif
1393
1394 return mp_builtin___import__(5, args);
1395}
1396
1397mp_obj_t mp_import_from(mp_obj_t module, qstr name) {
1398 DEBUG_printf("import from %p %s\n", module, qstr_str(name));
1399
1400 mp_obj_t dest[2];
1401
1402 mp_load_method_maybe(module, name, dest);
1403
1404 if (dest[1] != MP_OBJ_NULL) {
1405 // Hopefully we can't import bound method from an object
1406 import_error:
1407 mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("can't import name %q"), name);
1408 }
1409
1410 if (dest[0] != MP_OBJ_NULL) {
1411 return dest[0];
1412 }
1413
1414 #if MICROPY_ENABLE_EXTERNAL_IMPORT
1415
1416 // See if it's a package, then can try FS import
1417 if (!mp_obj_is_package(module)) {
1418 goto import_error;
1419 }
1420
1421 mp_load_method_maybe(module, MP_QSTR___name__, dest);
1422 size_t pkg_name_len;
1423 const char *pkg_name = mp_obj_str_get_data(dest[0], &pkg_name_len);
1424
1425 const uint dot_name_len = pkg_name_len + 1 + qstr_len(name);
1426 char *dot_name = mp_local_alloc(dot_name_len);
1427 memcpy(dot_name, pkg_name, pkg_name_len);
1428 dot_name[pkg_name_len] = '.';
1429 memcpy(dot_name + pkg_name_len + 1, qstr_str(name), qstr_len(name));
1430 qstr dot_name_q = qstr_from_strn(dot_name, dot_name_len);
1431 mp_local_free(dot_name);
1432
1433 // For fromlist, pass sentinel "non empty" value to force returning of leaf module
1434 return mp_import_name(dot_name_q, mp_const_true, MP_OBJ_NEW_SMALL_INT(0));
1435
1436 #else
1437
1438 // Package import not supported with external imports disabled
1439 goto import_error;
1440
1441 #endif
1442}
1443
1444void mp_import_all(mp_obj_t module) {
1445 DEBUG_printf("import all %p\n", module);
1446
1447 // TODO: Support __all__
1448 mp_map_t *map = &mp_obj_module_get_globals(module)->map;
1449 for (size_t i = 0; i < map->alloc; i++) {
1450 if (mp_map_slot_is_filled(map, i)) {
1451 // Entry in module global scope may be generated programmatically
1452 // (and thus be not a qstr for longer names). Avoid turning it in
1453 // qstr if it has '_' and was used exactly to save memory.
1454 const char *name = mp_obj_str_get_str(map->table[i].key);
1455 if (*name != '_') {
1456 qstr qname = mp_obj_str_get_qstr(map->table[i].key);
1457 mp_store_name(qname, map->table[i].value);
1458 }
1459 }
1460 }
1461}
1462
1463#if MICROPY_ENABLE_COMPILER
1464
1465mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
1466 // save context
1467 mp_obj_dict_t *volatile old_globals = mp_globals_get();
1468 mp_obj_dict_t *volatile old_locals = mp_locals_get();
1469
1470 // set new context
1471 mp_globals_set(globals);
1472 mp_locals_set(locals);
1473
1474 nlr_buf_t nlr;
1475 if (nlr_push(&nlr) == 0) {
1476 qstr source_name = lex->source_name;
1477 mp_parse_tree_t parse_tree = mp_parse(lex, parse_input_kind);
1478 mp_obj_t module_fun = mp_compile(&parse_tree, source_name, parse_input_kind == MP_PARSE_SINGLE_INPUT);
1479
1480 mp_obj_t ret;
1481 if (MICROPY_PY_BUILTINS_COMPILE && globals == NULL) {
1482 // for compile only, return value is the module function
1483 ret = module_fun;
1484 } else {
1485 // execute module function and get return value
1486 ret = mp_call_function_0(module_fun);
1487 }
1488
1489 // finish nlr block, restore context and return value
1490 nlr_pop();
1491 mp_globals_set(old_globals);
1492 mp_locals_set(old_locals);
1493 return ret;
1494 } else {
1495 // exception; restore context and re-raise same exception
1496 mp_globals_set(old_globals);
1497 mp_locals_set(old_locals);
1498 nlr_jump(nlr.ret_val);
1499 }
1500}
1501
1502#endif // MICROPY_ENABLE_COMPILER
1503
1504NORETURN void m_malloc_fail(size_t num_bytes) {
1505 DEBUG_printf("memory allocation failed, allocating %u bytes\n", (uint)num_bytes);
1506 #if MICROPY_ENABLE_GC
1507 if (gc_is_locked()) {
1508 mp_raise_msg(&mp_type_MemoryError, MP_ERROR_TEXT("memory allocation failed, heap is locked"));
1509 }
1510 #endif
1511 mp_raise_msg_varg(&mp_type_MemoryError,
1512 MP_ERROR_TEXT("memory allocation failed, allocating %u bytes"), (uint)num_bytes);
1513}
1514
1515NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg) {
1516 if (msg == NULL) {
1517 nlr_raise(mp_obj_new_exception(exc_type));
1518 } else {
1519 nlr_raise(mp_obj_new_exception_msg(exc_type, msg));
1520 }
1521}
1522
1523NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...) {
1524 va_list args;
1525 va_start(args, fmt);
1526 mp_obj_t exc = mp_obj_new_exception_msg_vlist(exc_type, fmt, args);
1527 va_end(args);
1528 nlr_raise(exc);
1529}
1530
1531NORETURN void mp_raise_ValueError(mp_rom_error_text_t msg) {
1532 mp_raise_msg(&mp_type_ValueError, msg);
1533}
1534
1535NORETURN void mp_raise_TypeError(mp_rom_error_text_t msg) {
1536 mp_raise_msg(&mp_type_TypeError, msg);
1537}
1538
1539NORETURN void mp_raise_OSError(int errno_) {
1540 nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(errno_)));
1541}
1542
1543NORETURN void mp_raise_NotImplementedError(mp_rom_error_text_t msg) {
1544 mp_raise_msg(&mp_type_NotImplementedError, msg);
1545}
1546
1547#if MICROPY_STACK_CHECK || MICROPY_ENABLE_PYSTACK
1548NORETURN void mp_raise_recursion_depth(void) {
1549 nlr_raise(mp_obj_new_exception_arg1(&mp_type_RuntimeError,
1550 MP_OBJ_NEW_QSTR(MP_QSTR_maximum_space_recursion_space_depth_space_exceeded)));
1551}
1552#endif
1553