1 | /* |
2 | * This file is part of the MicroPython project, http://micropython.org/ |
3 | * |
4 | * The MIT License (MIT) |
5 | * |
6 | * Copyright (c) 2013, 2014 Damien P. George |
7 | * Copyright (c) 2014-2017 Paul Sokolovsky |
8 | * |
9 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
10 | * of this software and associated documentation files (the "Software"), to deal |
11 | * in the Software without restriction, including without limitation the rights |
12 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
13 | * copies of the Software, and to permit persons to whom the Software is |
14 | * furnished to do so, subject to the following conditions: |
15 | * |
16 | * The above copyright notice and this permission notice shall be included in |
17 | * all copies or substantial portions of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
22 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
24 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
25 | * THE SOFTWARE. |
26 | */ |
27 | |
28 | #include <string.h> |
29 | #include <assert.h> |
30 | |
31 | #include "py/objtuple.h" |
32 | #include "py/runtime.h" |
33 | |
34 | // type check is done on getiter method to allow tuple, namedtuple, attrtuple |
35 | #define mp_obj_is_tuple_compatible(o) (mp_obj_get_type(o)->getiter == mp_obj_tuple_getiter) |
36 | |
37 | /******************************************************************************/ |
38 | /* tuple */ |
39 | |
40 | void mp_obj_tuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) { |
41 | mp_obj_tuple_t *o = MP_OBJ_TO_PTR(o_in); |
42 | if (MICROPY_PY_UJSON && kind == PRINT_JSON) { |
43 | mp_print_str(print, "[" ); |
44 | } else { |
45 | mp_print_str(print, "(" ); |
46 | kind = PRINT_REPR; |
47 | } |
48 | for (size_t i = 0; i < o->len; i++) { |
49 | if (i > 0) { |
50 | mp_print_str(print, ", " ); |
51 | } |
52 | mp_obj_print_helper(print, o->items[i], kind); |
53 | } |
54 | if (MICROPY_PY_UJSON && kind == PRINT_JSON) { |
55 | mp_print_str(print, "]" ); |
56 | } else { |
57 | if (o->len == 1) { |
58 | mp_print_str(print, "," ); |
59 | } |
60 | mp_print_str(print, ")" ); |
61 | } |
62 | } |
63 | |
64 | STATIC mp_obj_t mp_obj_tuple_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { |
65 | (void)type_in; |
66 | |
67 | mp_arg_check_num(n_args, n_kw, 0, 1, false); |
68 | |
69 | switch (n_args) { |
70 | case 0: |
71 | // return a empty tuple |
72 | return mp_const_empty_tuple; |
73 | |
74 | case 1: |
75 | default: { |
76 | // 1 argument, an iterable from which we make a new tuple |
77 | if (mp_obj_is_type(args[0], &mp_type_tuple)) { |
78 | return args[0]; |
79 | } |
80 | |
81 | // TODO optimise for cases where we know the length of the iterator |
82 | |
83 | size_t alloc = 4; |
84 | size_t len = 0; |
85 | mp_obj_t *items = m_new(mp_obj_t, alloc); |
86 | |
87 | mp_obj_t iterable = mp_getiter(args[0], NULL); |
88 | mp_obj_t item; |
89 | while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) { |
90 | if (len >= alloc) { |
91 | items = m_renew(mp_obj_t, items, alloc, alloc * 2); |
92 | alloc *= 2; |
93 | } |
94 | items[len++] = item; |
95 | } |
96 | |
97 | mp_obj_t tuple = mp_obj_new_tuple(len, items); |
98 | m_del(mp_obj_t, items, alloc); |
99 | |
100 | return tuple; |
101 | } |
102 | } |
103 | } |
104 | |
105 | // Don't pass MP_BINARY_OP_NOT_EQUAL here |
106 | STATIC mp_obj_t tuple_cmp_helper(mp_uint_t op, mp_obj_t self_in, mp_obj_t another_in) { |
107 | mp_check_self(mp_obj_is_tuple_compatible(self_in)); |
108 | const mp_obj_type_t *another_type = mp_obj_get_type(another_in); |
109 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); |
110 | if (another_type->getiter != mp_obj_tuple_getiter) { |
111 | // Slow path for user subclasses |
112 | another_in = mp_obj_cast_to_native_base(another_in, MP_OBJ_FROM_PTR(&mp_type_tuple)); |
113 | if (another_in == MP_OBJ_NULL) { |
114 | return MP_OBJ_NULL; |
115 | } |
116 | } |
117 | mp_obj_tuple_t *another = MP_OBJ_TO_PTR(another_in); |
118 | |
119 | return mp_obj_new_bool(mp_seq_cmp_objs(op, self->items, self->len, another->items, another->len)); |
120 | } |
121 | |
122 | mp_obj_t mp_obj_tuple_unary_op(mp_unary_op_t op, mp_obj_t self_in) { |
123 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); |
124 | switch (op) { |
125 | case MP_UNARY_OP_BOOL: |
126 | return mp_obj_new_bool(self->len != 0); |
127 | case MP_UNARY_OP_HASH: { |
128 | // start hash with pointer to empty tuple, to make it fairly unique |
129 | mp_int_t hash = (mp_int_t)mp_const_empty_tuple; |
130 | for (size_t i = 0; i < self->len; i++) { |
131 | hash += MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, self->items[i])); |
132 | } |
133 | return MP_OBJ_NEW_SMALL_INT(hash); |
134 | } |
135 | case MP_UNARY_OP_LEN: |
136 | return MP_OBJ_NEW_SMALL_INT(self->len); |
137 | default: |
138 | return MP_OBJ_NULL; // op not supported |
139 | } |
140 | } |
141 | |
142 | mp_obj_t mp_obj_tuple_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { |
143 | mp_obj_tuple_t *o = MP_OBJ_TO_PTR(lhs); |
144 | switch (op) { |
145 | case MP_BINARY_OP_ADD: |
146 | case MP_BINARY_OP_INPLACE_ADD: { |
147 | if (!mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(rhs)), MP_OBJ_FROM_PTR(&mp_type_tuple))) { |
148 | return MP_OBJ_NULL; // op not supported |
149 | } |
150 | mp_obj_tuple_t *p = MP_OBJ_TO_PTR(rhs); |
151 | mp_obj_tuple_t *s = MP_OBJ_TO_PTR(mp_obj_new_tuple(o->len + p->len, NULL)); |
152 | mp_seq_cat(s->items, o->items, o->len, p->items, p->len, mp_obj_t); |
153 | return MP_OBJ_FROM_PTR(s); |
154 | } |
155 | case MP_BINARY_OP_MULTIPLY: |
156 | case MP_BINARY_OP_INPLACE_MULTIPLY: { |
157 | mp_int_t n; |
158 | if (!mp_obj_get_int_maybe(rhs, &n)) { |
159 | return MP_OBJ_NULL; // op not supported |
160 | } |
161 | if (n <= 0) { |
162 | return mp_const_empty_tuple; |
163 | } |
164 | mp_obj_tuple_t *s = MP_OBJ_TO_PTR(mp_obj_new_tuple(o->len * n, NULL)); |
165 | mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items); |
166 | return MP_OBJ_FROM_PTR(s); |
167 | } |
168 | case MP_BINARY_OP_EQUAL: |
169 | case MP_BINARY_OP_LESS: |
170 | case MP_BINARY_OP_LESS_EQUAL: |
171 | case MP_BINARY_OP_MORE: |
172 | case MP_BINARY_OP_MORE_EQUAL: |
173 | return tuple_cmp_helper(op, lhs, rhs); |
174 | |
175 | default: |
176 | return MP_OBJ_NULL; // op not supported |
177 | } |
178 | } |
179 | |
180 | mp_obj_t mp_obj_tuple_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { |
181 | if (value == MP_OBJ_SENTINEL) { |
182 | // load |
183 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); |
184 | #if MICROPY_PY_BUILTINS_SLICE |
185 | if (mp_obj_is_type(index, &mp_type_slice)) { |
186 | mp_bound_slice_t slice; |
187 | if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) { |
188 | mp_raise_NotImplementedError(MP_ERROR_TEXT("only slices with step=1 (aka None) are supported" )); |
189 | } |
190 | mp_obj_tuple_t *res = MP_OBJ_TO_PTR(mp_obj_new_tuple(slice.stop - slice.start, NULL)); |
191 | mp_seq_copy(res->items, self->items + slice.start, res->len, mp_obj_t); |
192 | return MP_OBJ_FROM_PTR(res); |
193 | } |
194 | #endif |
195 | size_t index_value = mp_get_index(self->base.type, self->len, index, false); |
196 | return self->items[index_value]; |
197 | } else { |
198 | return MP_OBJ_NULL; // op not supported |
199 | } |
200 | } |
201 | |
202 | STATIC mp_obj_t tuple_count(mp_obj_t self_in, mp_obj_t value) { |
203 | mp_check_self(mp_obj_is_type(self_in, &mp_type_tuple)); |
204 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); |
205 | return mp_seq_count_obj(self->items, self->len, value); |
206 | } |
207 | STATIC MP_DEFINE_CONST_FUN_OBJ_2(tuple_count_obj, tuple_count); |
208 | |
209 | STATIC mp_obj_t tuple_index(size_t n_args, const mp_obj_t *args) { |
210 | mp_check_self(mp_obj_is_type(args[0], &mp_type_tuple)); |
211 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(args[0]); |
212 | return mp_seq_index_obj(self->items, self->len, n_args, args); |
213 | } |
214 | STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(tuple_index_obj, 2, 4, tuple_index); |
215 | |
216 | STATIC const mp_rom_map_elem_t tuple_locals_dict_table[] = { |
217 | { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&tuple_count_obj) }, |
218 | { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&tuple_index_obj) }, |
219 | }; |
220 | |
221 | STATIC MP_DEFINE_CONST_DICT(tuple_locals_dict, tuple_locals_dict_table); |
222 | |
223 | const mp_obj_type_t mp_type_tuple = { |
224 | { &mp_type_type }, |
225 | .name = MP_QSTR_tuple, |
226 | .print = mp_obj_tuple_print, |
227 | .make_new = mp_obj_tuple_make_new, |
228 | .unary_op = mp_obj_tuple_unary_op, |
229 | .binary_op = mp_obj_tuple_binary_op, |
230 | .subscr = mp_obj_tuple_subscr, |
231 | .getiter = mp_obj_tuple_getiter, |
232 | .locals_dict = (mp_obj_dict_t *)&tuple_locals_dict, |
233 | }; |
234 | |
235 | // the zero-length tuple |
236 | const mp_obj_tuple_t mp_const_empty_tuple_obj = {{&mp_type_tuple}, 0}; |
237 | |
238 | mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items) { |
239 | if (n == 0) { |
240 | return mp_const_empty_tuple; |
241 | } |
242 | mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n); |
243 | o->base.type = &mp_type_tuple; |
244 | o->len = n; |
245 | if (items) { |
246 | for (size_t i = 0; i < n; i++) { |
247 | o->items[i] = items[i]; |
248 | } |
249 | } |
250 | return MP_OBJ_FROM_PTR(o); |
251 | } |
252 | |
253 | void mp_obj_tuple_get(mp_obj_t self_in, size_t *len, mp_obj_t **items) { |
254 | assert(mp_obj_is_tuple_compatible(self_in)); |
255 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); |
256 | *len = self->len; |
257 | *items = &self->items[0]; |
258 | } |
259 | |
260 | void mp_obj_tuple_del(mp_obj_t self_in) { |
261 | assert(mp_obj_is_type(self_in, &mp_type_tuple)); |
262 | mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); |
263 | m_del_var(mp_obj_tuple_t, mp_obj_t, self->len, self); |
264 | } |
265 | |
266 | /******************************************************************************/ |
267 | /* tuple iterator */ |
268 | |
269 | typedef struct _mp_obj_tuple_it_t { |
270 | mp_obj_base_t base; |
271 | mp_fun_1_t iternext; |
272 | mp_obj_tuple_t *tuple; |
273 | size_t cur; |
274 | } mp_obj_tuple_it_t; |
275 | |
276 | STATIC mp_obj_t tuple_it_iternext(mp_obj_t self_in) { |
277 | mp_obj_tuple_it_t *self = MP_OBJ_TO_PTR(self_in); |
278 | if (self->cur < self->tuple->len) { |
279 | mp_obj_t o_out = self->tuple->items[self->cur]; |
280 | self->cur += 1; |
281 | return o_out; |
282 | } else { |
283 | return MP_OBJ_STOP_ITERATION; |
284 | } |
285 | } |
286 | |
287 | mp_obj_t mp_obj_tuple_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) { |
288 | assert(sizeof(mp_obj_tuple_it_t) <= sizeof(mp_obj_iter_buf_t)); |
289 | mp_obj_tuple_it_t *o = (mp_obj_tuple_it_t *)iter_buf; |
290 | o->base.type = &mp_type_polymorph_iter; |
291 | o->iternext = tuple_it_iternext; |
292 | o->tuple = MP_OBJ_TO_PTR(o_in); |
293 | o->cur = 0; |
294 | return MP_OBJ_FROM_PTR(o); |
295 | } |
296 | |