| /* |
| * Duktape.Buffer, Node.js Buffer, and Khronos/ES6 TypedArray built-ins |
| */ |
| |
| #include "duk_internal.h" |
| |
| /* |
| * Misc helpers |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Map DUK_HBUFFEROBJECT_ELEM_xxx to duk_hobject class number. |
| * Sync with duk_hbufferobject.h and duk_hobject.h. |
| */ |
| static const duk_uint8_t duk__buffer_class_from_elemtype[9] = { |
| DUK_HOBJECT_CLASS_UINT8ARRAY, |
| DUK_HOBJECT_CLASS_UINT8CLAMPEDARRAY, |
| DUK_HOBJECT_CLASS_INT8ARRAY, |
| DUK_HOBJECT_CLASS_UINT16ARRAY, |
| DUK_HOBJECT_CLASS_INT16ARRAY, |
| DUK_HOBJECT_CLASS_UINT32ARRAY, |
| DUK_HOBJECT_CLASS_INT32ARRAY, |
| DUK_HOBJECT_CLASS_FLOAT32ARRAY, |
| DUK_HOBJECT_CLASS_FLOAT64ARRAY |
| }; |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Map DUK_HBUFFEROBJECT_ELEM_xxx to prototype object built-in index. |
| * Sync with duk_hbufferobject.h. |
| */ |
| static const duk_uint8_t duk__buffer_proto_from_elemtype[9] = { |
| DUK_BIDX_UINT8ARRAY_PROTOTYPE, |
| DUK_BIDX_UINT8CLAMPEDARRAY_PROTOTYPE, |
| DUK_BIDX_INT8ARRAY_PROTOTYPE, |
| DUK_BIDX_UINT16ARRAY_PROTOTYPE, |
| DUK_BIDX_INT16ARRAY_PROTOTYPE, |
| DUK_BIDX_UINT32ARRAY_PROTOTYPE, |
| DUK_BIDX_INT32ARRAY_PROTOTYPE, |
| DUK_BIDX_FLOAT32ARRAY_PROTOTYPE, |
| DUK_BIDX_FLOAT64ARRAY_PROTOTYPE |
| }; |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Map DUK__FLX_xxx to byte size. |
| */ |
| static const duk_uint8_t duk__buffer_nbytes_from_fldtype[6] = { |
| 1, /* DUK__FLD_8BIT */ |
| 2, /* DUK__FLD_16BIT */ |
| 4, /* DUK__FLD_32BIT */ |
| 4, /* DUK__FLD_FLOAT */ |
| 8, /* DUK__FLD_DOUBLE */ |
| 0 /* DUK__FLD_VARINT; not relevant here */ |
| }; |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Bitfield for each DUK_HBUFFEROBJECT_ELEM_xxx indicating which element types |
| * are compatible with a blind byte copy for the TypedArray set() method (also |
| * used for TypedArray constructor). Array index is target buffer elem type, |
| * bitfield indicates compatible source types. The types must have same byte |
| * size and they must be coercion compatible. |
| */ |
| static duk_uint16_t duk__buffer_elemtype_copy_compatible[9] = { |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_UINT8 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT8) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT8CLAMPED) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_INT8), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_UINT8CLAMPED |
| * Note: INT8 is -not- copy compatible, e.g. -1 would coerce to 0x00. |
| */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT8) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT8CLAMPED), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_INT8 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT8) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT8CLAMPED) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_INT8), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_UINT16 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT16) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_INT16), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_INT16 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT16) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_INT16), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_UINT32 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT32) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_INT32), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_INT32 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_UINT32) | |
| (1U << DUK_HBUFFEROBJECT_ELEM_INT32), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_FLOAT32 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_FLOAT32), |
| |
| /* xxx -> DUK_HBUFFEROBJECT_ELEM_FLOAT64 */ |
| (1U << DUK_HBUFFEROBJECT_ELEM_FLOAT64) |
| }; |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Shared helper. */ |
| DUK_LOCAL duk_hbufferobject *duk__getrequire_bufobj_this(duk_context *ctx, duk_bool_t throw_flag) { |
| duk_hthread *thr; |
| duk_tval *tv; |
| duk_hbufferobject *h_this; |
| |
| DUK_ASSERT(ctx != NULL); |
| thr = (duk_hthread *) ctx; |
| |
| tv = duk_get_borrowed_this_tval(ctx); |
| DUK_ASSERT(tv != NULL); |
| if (DUK_TVAL_IS_OBJECT(tv)) { |
| h_this = (duk_hbufferobject *) DUK_TVAL_GET_OBJECT(tv); |
| DUK_ASSERT(h_this != NULL); |
| if (DUK_HOBJECT_IS_BUFFEROBJECT((duk_hobject *) h_this)) { |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_this); |
| return h_this; |
| } |
| } |
| |
| if (throw_flag) { |
| DUK_ERROR_TYPE(thr, DUK_STR_NOT_BUFFER); |
| } |
| return NULL; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Check that 'this' is a duk_hbufferobject and return a pointer to it. */ |
| DUK_LOCAL duk_hbufferobject *duk__get_bufobj_this(duk_context *ctx) { |
| return duk__getrequire_bufobj_this(ctx, 0); |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Check that 'this' is a duk_hbufferobject and return a pointer to it |
| * (NULL if not). |
| */ |
| DUK_LOCAL duk_hbufferobject *duk__require_bufobj_this(duk_context *ctx) { |
| return duk__getrequire_bufobj_this(ctx, 1); |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Check that value is a duk_hbufferobject and return a pointer to it. */ |
| DUK_LOCAL duk_hbufferobject *duk__require_bufobj_value(duk_context *ctx, duk_idx_t index) { |
| duk_hthread *thr; |
| duk_tval *tv; |
| duk_hbufferobject *h_obj; |
| |
| thr = (duk_hthread *) ctx; |
| |
| /* Don't accept relative indices now. */ |
| DUK_ASSERT(index >= 0); |
| |
| tv = duk_require_tval(ctx, index); |
| DUK_ASSERT(tv != NULL); |
| if (DUK_TVAL_IS_OBJECT(tv)) { |
| h_obj = (duk_hbufferobject *) DUK_TVAL_GET_OBJECT(tv); |
| DUK_ASSERT(h_obj != NULL); |
| if (DUK_HOBJECT_IS_BUFFEROBJECT((duk_hobject *) h_obj)) { |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_obj); |
| return h_obj; |
| } |
| } |
| |
| DUK_ERROR_TYPE(thr, DUK_STR_NOT_BUFFER); |
| return NULL; /* not reachable */ |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| DUK_LOCAL void duk__set_bufobj_buffer(duk_context *ctx, duk_hbufferobject *h_bufobj, duk_hbuffer *h_val) { |
| duk_hthread *thr; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| DUK_ASSERT(ctx != NULL); |
| DUK_ASSERT(h_bufobj != NULL); |
| DUK_ASSERT(h_bufobj->buf == NULL); /* no need to decref */ |
| DUK_ASSERT(h_val != NULL); |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| h_bufobj->buf = h_val; |
| DUK_HBUFFER_INCREF(thr, h_val); |
| h_bufobj->length = (duk_uint_t) DUK_HBUFFER_GET_SIZE(h_val); |
| DUK_ASSERT(h_bufobj->shift == 0); |
| DUK_ASSERT(h_bufobj->elem_type == DUK_HBUFFEROBJECT_ELEM_UINT8); |
| |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| } |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_LOCAL duk_hbufferobject *duk__push_arraybuffer_with_length(duk_context *ctx, duk_uint_t len) { |
| duk_hbuffer *h_val; |
| duk_hbufferobject *h_bufobj; |
| |
| (void) duk_push_fixed_buffer(ctx, (duk_size_t) len); |
| h_val = (duk_hbuffer *) duk_get_hbuffer(ctx, -1); |
| DUK_ASSERT(h_val != NULL); |
| |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(DUK_HOBJECT_CLASS_ARRAYBUFFER), |
| DUK_BIDX_ARRAYBUFFER_PROTOTYPE); |
| DUK_ASSERT(h_bufobj != NULL); |
| |
| duk__set_bufobj_buffer(ctx, h_bufobj, h_val); |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| return h_bufobj; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Shared offset/length coercion helper. */ |
| DUK_LOCAL void duk__resolve_offset_opt_length(duk_context *ctx, |
| duk_hbufferobject *h_bufarg, |
| duk_idx_t idx_offset, |
| duk_idx_t idx_length, |
| duk_uint_t *out_offset, |
| duk_uint_t *out_length, |
| duk_bool_t throw_flag) { |
| duk_hthread *thr; |
| duk_int_t offset_signed; |
| duk_int_t length_signed; |
| duk_uint_t offset; |
| duk_uint_t length; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| offset_signed = duk_to_int(ctx, idx_offset); |
| if (offset_signed < 0) { |
| goto fail_range; |
| } |
| offset = (duk_uint_t) offset_signed; |
| if (offset > h_bufarg->length) { |
| goto fail_range; |
| } |
| DUK_ASSERT_DISABLE(offset >= 0); /* unsigned */ |
| DUK_ASSERT(offset <= h_bufarg->length); |
| |
| if (duk_is_undefined(ctx, idx_length)) { |
| DUK_ASSERT(h_bufarg->length >= offset); |
| length = h_bufarg->length - offset; /* >= 0 */ |
| } else { |
| length_signed = duk_to_int(ctx, idx_length); |
| if (length_signed < 0) { |
| goto fail_range; |
| } |
| length = (duk_uint_t) length_signed; |
| DUK_ASSERT(h_bufarg->length >= offset); |
| if (length > h_bufarg->length - offset) { |
| /* Unlike for negative arguments, some call sites |
| * want length to be clamped if it's positive. |
| */ |
| if (throw_flag) { |
| goto fail_range; |
| } else { |
| length = h_bufarg->length - offset; |
| } |
| } |
| } |
| DUK_ASSERT_DISABLE(length >= 0); /* unsigned */ |
| DUK_ASSERT(offset + length <= h_bufarg->length); |
| |
| *out_offset = offset; |
| *out_length = length; |
| return; |
| |
| fail_range: |
| DUK_ERROR_RANGE(thr, DUK_STR_INVALID_CALL_ARGS); |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Shared lenient buffer length clamping helper. No negative indices, no |
| * element/byte shifting. |
| */ |
| DUK_LOCAL void duk__clamp_startend_nonegidx_noshift(duk_context *ctx, |
| duk_hbufferobject *h_bufobj, |
| duk_idx_t idx_start, |
| duk_idx_t idx_end, |
| duk_int_t *out_start_offset, |
| duk_int_t *out_end_offset) { |
| duk_int_t buffer_length; |
| duk_int_t start_offset; |
| duk_int_t end_offset; |
| |
| DUK_ASSERT(out_start_offset != NULL); |
| DUK_ASSERT(out_end_offset != NULL); |
| |
| buffer_length = (duk_int_t) h_bufobj->length; |
| |
| /* undefined coerces to zero which is correct */ |
| start_offset = duk_to_int_clamped(ctx, idx_start, 0, buffer_length); |
| if (duk_is_undefined(ctx, idx_end)) { |
| end_offset = buffer_length; |
| } else { |
| end_offset = duk_to_int_clamped(ctx, idx_end, start_offset, buffer_length); |
| } |
| |
| DUK_ASSERT(start_offset >= 0); |
| DUK_ASSERT(start_offset <= buffer_length); |
| DUK_ASSERT(end_offset >= 0); |
| DUK_ASSERT(end_offset <= buffer_length); |
| DUK_ASSERT(start_offset <= end_offset); |
| |
| *out_start_offset = start_offset; |
| *out_end_offset = end_offset; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Shared lenient buffer length clamping helper. Indices are treated as |
| * element indices (though output values are byte offsets) which only |
| * really matters for TypedArray views as other buffer object have a zero |
| * shift. Negative indices are counted from end of input slice; crossed |
| * indices are clamped to zero length; and final indices are clamped |
| * against input slice. Used for e.g. ArrayBuffer slice(). |
| */ |
| DUK_LOCAL void duk__clamp_startend_negidx_shifted(duk_context *ctx, |
| duk_hbufferobject *h_bufobj, |
| duk_idx_t idx_start, |
| duk_idx_t idx_end, |
| duk_int_t *out_start_offset, |
| duk_int_t *out_end_offset) { |
| duk_int_t buffer_length; |
| duk_int_t start_offset; |
| duk_int_t end_offset; |
| |
| DUK_ASSERT(out_start_offset != NULL); |
| DUK_ASSERT(out_end_offset != NULL); |
| |
| buffer_length = (duk_int_t) h_bufobj->length; |
| buffer_length >>= h_bufobj->shift; /* as elements */ |
| |
| /* Resolve start/end offset as element indices first; arguments |
| * at idx_start/idx_end are element offsets. Working with element |
| * indices first also avoids potential for wrapping. |
| */ |
| |
| start_offset = duk_to_int(ctx, idx_start); |
| if (start_offset < 0) { |
| start_offset = buffer_length + start_offset; |
| } |
| if (duk_is_undefined(ctx, idx_end)) { |
| end_offset = buffer_length; |
| } else { |
| end_offset = duk_to_int(ctx, idx_end); |
| if (end_offset < 0) { |
| end_offset = buffer_length + end_offset; |
| } |
| } |
| /* Note: start_offset/end_offset can still be < 0 here. */ |
| |
| if (start_offset < 0) { |
| start_offset = 0; |
| } else if (start_offset > buffer_length) { |
| start_offset = buffer_length; |
| } |
| if (end_offset < start_offset) { |
| end_offset = start_offset; |
| } else if (end_offset > buffer_length) { |
| end_offset = buffer_length; |
| } |
| DUK_ASSERT(start_offset >= 0); |
| DUK_ASSERT(start_offset <= buffer_length); |
| DUK_ASSERT(end_offset >= 0); |
| DUK_ASSERT(end_offset <= buffer_length); |
| DUK_ASSERT(start_offset <= end_offset); |
| |
| /* Convert indices to byte offsets. */ |
| start_offset <<= h_bufobj->shift; |
| end_offset <<= h_bufobj->shift; |
| |
| *out_start_offset = start_offset; |
| *out_end_offset = end_offset; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Indexed read/write helpers (also used from outside this file) |
| */ |
| |
| DUK_INTERNAL void duk_hbufferobject_push_validated_read(duk_context *ctx, duk_hbufferobject *h_bufobj, duk_uint8_t *p, duk_small_uint_t elem_size) { |
| duk_double_union du; |
| |
| DUK_MEMCPY((void *) du.uc, (const void *) p, (size_t) elem_size); |
| |
| switch (h_bufobj->elem_type) { |
| case DUK_HBUFFEROBJECT_ELEM_UINT8: |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| case DUK_HBUFFEROBJECT_ELEM_UINT8CLAMPED: |
| #endif |
| duk_push_uint(ctx, (duk_uint_t) du.uc[0]); |
| break; |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* These are not needed when only Duktape.Buffer is supported. */ |
| case DUK_HBUFFEROBJECT_ELEM_INT8: |
| duk_push_int(ctx, (duk_int_t) (duk_int8_t) du.uc[0]); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_UINT16: |
| duk_push_uint(ctx, (duk_uint_t) du.us[0]); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_INT16: |
| duk_push_int(ctx, (duk_int_t) (duk_int16_t) du.us[0]); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_UINT32: |
| duk_push_uint(ctx, (duk_uint_t) du.ui[0]); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_INT32: |
| duk_push_int(ctx, (duk_int_t) (duk_int32_t) du.ui[0]); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_FLOAT32: |
| duk_push_number(ctx, (duk_double_t) du.f[0]); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_FLOAT64: |
| duk_push_number(ctx, (duk_double_t) du.d); |
| break; |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| default: |
| DUK_UNREACHABLE(); |
| } |
| } |
| |
| DUK_INTERNAL void duk_hbufferobject_validated_write(duk_context *ctx, duk_hbufferobject *h_bufobj, duk_uint8_t *p, duk_small_uint_t elem_size) { |
| duk_double_union du; |
| |
| /* NOTE! Caller must ensure that any side effects from the |
| * coercions below are safe. If that cannot be guaranteed |
| * (which is normally the case), caller must coerce the |
| * argument using duk_to_number() before any pointer |
| * validations; the result of duk_to_number() always coerces |
| * without side effects here. |
| */ |
| |
| switch (h_bufobj->elem_type) { |
| case DUK_HBUFFEROBJECT_ELEM_UINT8: |
| du.uc[0] = (duk_uint8_t) duk_to_uint32(ctx, -1); |
| break; |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* These are not needed when only Duktape.Buffer is supported. */ |
| case DUK_HBUFFEROBJECT_ELEM_UINT8CLAMPED: |
| du.uc[0] = (duk_uint8_t) duk_to_uint8clamped(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_INT8: |
| du.uc[0] = (duk_uint8_t) duk_to_int32(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_UINT16: |
| du.us[0] = (duk_uint16_t) duk_to_uint32(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_INT16: |
| du.us[0] = (duk_uint16_t) duk_to_int32(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_UINT32: |
| du.ui[0] = (duk_uint32_t) duk_to_uint32(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_INT32: |
| du.ui[0] = (duk_uint32_t) duk_to_int32(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_FLOAT32: |
| du.f[0] = (duk_float_t) duk_to_number(ctx, -1); |
| break; |
| case DUK_HBUFFEROBJECT_ELEM_FLOAT64: |
| du.d = (duk_double_t) duk_to_number(ctx, -1); |
| break; |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| default: |
| DUK_UNREACHABLE(); |
| } |
| |
| DUK_MEMCPY((void *) p, (const void *) du.uc, (size_t) elem_size); |
| } |
| |
| /* |
| * Duktape.Buffer: constructor |
| */ |
| |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_constructor(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_size_t buf_size; |
| duk_small_int_t buf_dynamic; |
| duk_uint8_t *buf_data; |
| const duk_uint8_t *src_data; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| /* |
| * Constructor arguments are currently somewhat compatible with |
| * (keep it that way if possible): |
| * |
| * http://nodejs.org/api/buffer.html |
| * |
| * Note that the ToBuffer() coercion (duk_to_buffer()) does NOT match |
| * the constructor behavior. |
| */ |
| |
| buf_dynamic = duk_get_boolean(ctx, 1); /* default to false */ |
| |
| switch (duk_get_type(ctx, 0)) { |
| case DUK_TYPE_NUMBER: { |
| /* new buffer of specified size */ |
| buf_size = (duk_size_t) duk_to_int(ctx, 0); |
| (void) duk_push_buffer(ctx, buf_size, buf_dynamic); |
| break; |
| } |
| case DUK_TYPE_BUFFER: { |
| /* return input buffer, converted to a Duktape.Buffer object |
| * if called as a constructor (no change if called as a |
| * function). |
| */ |
| duk_set_top(ctx, 1); |
| break; |
| } |
| case DUK_TYPE_STRING: { |
| /* new buffer with string contents */ |
| src_data = (const duk_uint8_t *) duk_get_lstring(ctx, 0, &buf_size); |
| DUK_ASSERT(src_data != NULL); /* even for zero-length string */ |
| buf_data = (duk_uint8_t *) duk_push_buffer(ctx, buf_size, buf_dynamic); |
| DUK_MEMCPY((void *) buf_data, (const void *) src_data, (size_t) buf_size); |
| break; |
| } |
| case DUK_TYPE_OBJECT: { |
| /* For all duk_hbufferobjects, get the plain buffer inside |
| * without making a copy. This is compatible with Duktape 1.2 |
| * but means that a slice/view information is ignored and the |
| * full underlying buffer is returned. |
| * |
| * If called as a constructor, a new Duktape.Buffer object |
| * pointing to the same plain buffer is created below. |
| */ |
| duk_hbufferobject *h_bufobj; |
| h_bufobj = (duk_hbufferobject *) duk_get_hobject(ctx, 0); |
| DUK_ASSERT(h_bufobj != NULL); |
| if (!DUK_HOBJECT_IS_BUFFEROBJECT((duk_hobject *) h_bufobj)) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| if (h_bufobj->buf == NULL) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| duk_push_hbuffer(ctx, h_bufobj->buf); |
| break; |
| } |
| case DUK_TYPE_NONE: |
| default: { |
| return DUK_RET_TYPE_ERROR; |
| } |
| } |
| DUK_ASSERT(duk_is_buffer(ctx, -1)); |
| |
| /* stack is unbalanced, but: [ <something> buf ] */ |
| |
| if (duk_is_constructor_call(ctx)) { |
| duk_hbufferobject *h_bufobj; |
| duk_hbuffer *h_val; |
| |
| h_val = duk_get_hbuffer(ctx, -1); |
| DUK_ASSERT(h_val != NULL); |
| |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(DUK_HOBJECT_CLASS_BUFFER), |
| DUK_BIDX_BUFFER_PROTOTYPE); |
| DUK_ASSERT(h_bufobj != NULL); |
| |
| duk__set_bufobj_buffer(ctx, h_bufobj, h_val); |
| |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| } |
| /* Note: unbalanced stack on purpose */ |
| |
| return 1; |
| } |
| |
| /* |
| * Node.js Buffer: constructor |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_constructor(duk_context *ctx) { |
| /* Internal class is Object: Object.prototype.toString.call(new Buffer(0)) |
| * prints "[object Object]". |
| */ |
| duk_int_t len; |
| duk_int_t i; |
| duk_hbuffer *h_buf; |
| duk_hbufferobject *h_bufobj; |
| duk_size_t buf_size; |
| |
| switch (duk_get_type(ctx, 0)) { |
| case DUK_TYPE_BUFFER: { |
| /* Custom behavior: plain buffer is used as internal buffer |
| * without making a copy (matches Duktape.Buffer). |
| */ |
| duk_set_top(ctx, 1); /* -> [ buffer ] */ |
| break; |
| } |
| case DUK_TYPE_NUMBER: { |
| len = duk_to_int_clamped(ctx, 0, 0, DUK_INT_MAX); |
| (void) duk_push_fixed_buffer(ctx, (duk_size_t) len); |
| break; |
| } |
| case DUK_TYPE_OBJECT: { |
| duk_uint8_t *buf; |
| |
| (void) duk_get_prop_string(ctx, 0, "length"); |
| len = duk_to_int_clamped(ctx, -1, 0, DUK_INT_MAX); |
| duk_pop(ctx); |
| buf = (duk_uint8_t *) duk_push_fixed_buffer(ctx, (duk_size_t) len); |
| for (i = 0; i < len; i++) { |
| /* XXX: fast path for array arguments? */ |
| duk_get_prop_index(ctx, 0, (duk_uarridx_t) i); |
| buf[i] = (duk_uint8_t) (duk_to_uint32(ctx, -1) & 0xffU); |
| duk_pop(ctx); |
| } |
| break; |
| } |
| case DUK_TYPE_STRING: { |
| /* ignore encoding for now */ |
| duk_dup(ctx, 0); |
| (void) duk_to_buffer(ctx, -1, &buf_size); |
| break; |
| } |
| default: |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| DUK_ASSERT(duk_is_buffer(ctx, -1)); |
| h_buf = duk_get_hbuffer(ctx, -1); |
| DUK_ASSERT(h_buf != NULL); |
| |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(DUK_HOBJECT_CLASS_BUFFER), |
| DUK_BIDX_NODEJS_BUFFER_PROTOTYPE); |
| DUK_ASSERT(h_bufobj != NULL); |
| |
| h_bufobj->buf = h_buf; |
| DUK_HBUFFER_INCREF(thr, h_buf); |
| DUK_ASSERT(h_bufobj->offset == 0); |
| h_bufobj->length = (duk_int_t) DUK_HBUFFER_GET_SIZE(h_buf); |
| DUK_ASSERT(h_bufobj->elem_type == DUK_HBUFFEROBJECT_ELEM_UINT8); |
| |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_constructor(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * ArrayBuffer, DataView, and TypedArray constructors |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_arraybuffer_constructor(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_bufobj; |
| duk_hbuffer *h_val; |
| |
| DUK_ASSERT_CTX_VALID(ctx); |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| /* XXX: function flag to make this automatic? */ |
| if (!duk_is_constructor_call(ctx)) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| if (duk_is_buffer(ctx, 0)) { |
| /* Custom behavior: plain buffer is used as internal buffer |
| * without making a copy (matches Duktape.Buffer). |
| */ |
| |
| h_val = duk_get_hbuffer(ctx, 0); |
| DUK_ASSERT(h_val != NULL); |
| |
| /* XXX: accept any duk_hbufferobject type as an input also? */ |
| } else { |
| duk_int_t len; |
| len = duk_to_int(ctx, 0); |
| if (len < 0) { |
| goto fail_length; |
| } |
| (void) duk_push_fixed_buffer(ctx, (duk_size_t) len); |
| h_val = (duk_hbuffer *) duk_get_hbuffer(ctx, -1); |
| DUK_ASSERT(h_val != NULL); |
| |
| #if !defined(DUK_USE_ZERO_BUFFER_DATA) |
| /* Khronos/ES6 requires zeroing even when DUK_USE_ZERO_BUFFER_DATA |
| * is not set. |
| */ |
| DUK_ASSERT(!DUK_HBUFFER_HAS_DYNAMIC((duk_hbuffer *) h_val)); |
| DUK_MEMZERO((void *) DUK_HBUFFER_FIXED_GET_DATA_PTR(thr->heap, h_val), (duk_size_t) len); |
| #endif |
| } |
| |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(DUK_HOBJECT_CLASS_ARRAYBUFFER), |
| DUK_BIDX_ARRAYBUFFER_PROTOTYPE); |
| DUK_ASSERT(h_bufobj != NULL); |
| |
| duk__set_bufobj_buffer(ctx, h_bufobj, h_val); |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| return 1; |
| |
| fail_length: |
| return DUK_RET_RANGE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_arraybuffer_constructor(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| |
| /* Format of magic, bits: |
| * 0...1: elem size shift (0-3) |
| * 2...5: elem type (DUK_HBUFFEROBJECT_ELEM_xxx) |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_typedarray_constructor(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_tval *tv; |
| duk_hobject *h_obj; |
| duk_hbufferobject *h_bufobj = NULL; |
| duk_hbufferobject *h_bufarr = NULL; |
| duk_hbufferobject *h_bufarg = NULL; |
| duk_hbuffer *h_val; |
| duk_small_uint_t magic; |
| duk_small_uint_t shift; |
| duk_small_uint_t elem_type; |
| duk_small_uint_t elem_size; |
| duk_small_uint_t class_num; |
| duk_small_uint_t proto_bidx; |
| duk_uint_t align_mask; |
| duk_uint_t elem_length; |
| duk_int_t elem_length_signed; |
| duk_uint_t byte_length; |
| duk_small_uint_t copy_mode; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| /* XXX: function flag to make this automatic? */ |
| if (!duk_is_constructor_call(ctx)) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| /* We could fit built-in index into magic but that'd make the magic |
| * number dependent on built-in numbering (genbuiltins.py doesn't |
| * handle that yet). So map both class and prototype from the |
| * element type. |
| */ |
| magic = duk_get_current_magic(ctx); |
| shift = magic & 0x03; /* bits 0...1: shift */ |
| elem_type = (magic >> 2) & 0x0f; /* bits 2...5: type */ |
| elem_size = 1 << shift; |
| align_mask = elem_size - 1; |
| DUK_ASSERT(elem_type < sizeof(duk__buffer_proto_from_elemtype) / sizeof(duk_uint8_t)); |
| proto_bidx = duk__buffer_proto_from_elemtype[elem_type]; |
| DUK_ASSERT(proto_bidx < DUK_NUM_BUILTINS); |
| DUK_ASSERT(elem_type < sizeof(duk__buffer_class_from_elemtype) / sizeof(duk_uint8_t)); |
| class_num = duk__buffer_class_from_elemtype[elem_type]; |
| |
| DUK_DD(DUK_DDPRINT("typedarray constructor, magic=%d, shift=%d, elem_type=%d, " |
| "elem_size=%d, proto_bidx=%d, class_num=%d", |
| (int) magic, (int) shift, (int) elem_type, (int) elem_size, |
| (int) proto_bidx, (int) class_num)); |
| |
| /* Argument variants. When the argument is an ArrayBuffer a view to |
| * the same buffer is created; otherwise a new ArrayBuffer is always |
| * created. |
| */ |
| |
| tv = duk_get_tval(ctx, 0); |
| DUK_ASSERT(tv != NULL); /* arg count */ |
| if (DUK_TVAL_IS_OBJECT(tv)) { |
| h_obj = DUK_TVAL_GET_OBJECT(tv); |
| DUK_ASSERT(h_obj != NULL); |
| |
| if (DUK_HOBJECT_GET_CLASS_NUMBER(h_obj) == DUK_HOBJECT_CLASS_ARRAYBUFFER) { |
| /* ArrayBuffer: unlike any other argument variant, create |
| * a view into the existing buffer. |
| */ |
| |
| duk_int_t byte_offset_signed; |
| duk_uint_t byte_offset; |
| |
| h_bufarg = (duk_hbufferobject *) h_obj; |
| |
| byte_offset_signed = duk_to_int(ctx, 1); |
| if (byte_offset_signed < 0) { |
| goto fail_arguments; |
| } |
| byte_offset = (duk_uint_t) byte_offset_signed; |
| if (byte_offset > h_bufarg->length || |
| (byte_offset & align_mask) != 0) { |
| /* Must be >= 0 and multiple of element size. */ |
| goto fail_arguments; |
| } |
| if (duk_is_undefined(ctx, 2)) { |
| DUK_ASSERT(h_bufarg->length >= byte_offset); |
| byte_length = h_bufarg->length - byte_offset; |
| if ((byte_length & align_mask) != 0) { |
| /* Must be element size multiple from |
| * start offset to end of buffer. |
| */ |
| goto fail_arguments; |
| } |
| elem_length = (byte_length >> shift); |
| } else { |
| elem_length_signed = duk_to_int(ctx, 2); |
| if (elem_length_signed < 0) { |
| goto fail_arguments; |
| } |
| elem_length = (duk_uint_t) elem_length_signed; |
| byte_length = elem_length << shift; |
| if ((byte_length >> shift) != elem_length) { |
| /* Byte length would overflow. */ |
| /* XXX: easier check with less code? */ |
| goto fail_arguments; |
| } |
| DUK_ASSERT(h_bufarg->length >= byte_offset); |
| if (byte_length > h_bufarg->length - byte_offset) { |
| /* Not enough data. */ |
| goto fail_arguments; |
| } |
| } |
| DUK_UNREF(elem_length); |
| DUK_ASSERT_DISABLE(byte_offset >= 0); |
| DUK_ASSERT(byte_offset <= h_bufarg->length); |
| DUK_ASSERT_DISABLE(byte_length >= 0); |
| DUK_ASSERT(byte_offset + byte_length <= h_bufarg->length); |
| DUK_ASSERT((elem_length << shift) == byte_length); |
| |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(class_num), |
| proto_bidx); |
| h_val = h_bufarg->buf; |
| if (h_val == NULL) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| h_bufobj->buf = h_val; |
| DUK_HBUFFER_INCREF(thr, h_val); |
| h_bufobj->offset = h_bufarg->offset + byte_offset; |
| h_bufobj->length = byte_length; |
| h_bufobj->shift = (duk_uint8_t) shift; |
| h_bufobj->elem_type = (duk_uint8_t) elem_type; |
| h_bufobj->is_view = 1; |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| /* Set .buffer to the argument ArrayBuffer. */ |
| duk_dup(ctx, 0); |
| duk_xdef_prop_stridx(ctx, -2, DUK_STRIDX_LC_BUFFER, DUK_PROPDESC_FLAGS_NONE); |
| duk_compact(ctx, -1); |
| return 1; |
| } else if (DUK_HOBJECT_IS_BUFFEROBJECT(h_obj)) { |
| /* TypedArray (or other non-ArrayBuffer duk_hbufferobject). |
| * Conceptually same behavior as for an Array-like argument, |
| * with a few fast paths. |
| */ |
| |
| h_bufarg = (duk_hbufferobject *) h_obj; |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufarg); |
| elem_length_signed = (duk_int_t) (h_bufarg->length >> h_bufarg->shift); |
| if (h_bufarg->buf == NULL) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| /* Select copy mode. Must take into account element |
| * compatibility and validity of the underlying source |
| * buffer. |
| */ |
| |
| DUK_DDD(DUK_DDDPRINT("selecting copy mode for bufobj arg, " |
| "src byte_length=%ld, src shift=%d, " |
| "src/dst elem_length=%ld; " |
| "dst shift=%d -> dst byte_length=%ld", |
| (long) h_bufarg->length, (int) h_bufarg->shift, |
| (long) elem_length_signed, (int) shift, |
| (long) (elem_length_signed << shift))); |
| |
| copy_mode = 2; /* default is explicit index read/write copy */ |
| DUK_ASSERT(elem_type < sizeof(duk__buffer_elemtype_copy_compatible) / sizeof(duk_uint16_t)); |
| if (DUK_HBUFFEROBJECT_VALID_SLICE(h_bufarg)) { |
| if ((duk__buffer_elemtype_copy_compatible[elem_type] & (1 << h_bufarg->elem_type)) != 0) { |
| DUK_DDD(DUK_DDDPRINT("source/target are copy compatible, memcpy")); |
| DUK_ASSERT(shift == h_bufarg->shift); /* byte sizes will match */ |
| copy_mode = 0; |
| } else { |
| DUK_DDD(DUK_DDDPRINT("source/target not copy compatible but valid, fast copy")); |
| copy_mode = 1; |
| } |
| } |
| } else { |
| /* Array or Array-like */ |
| elem_length_signed = (duk_int_t) duk_get_length(ctx, 0); |
| copy_mode = 2; |
| } |
| } else if (DUK_TVAL_IS_BUFFER(tv)) { |
| /* Accept plain buffer values like array initializers |
| * (new in Duktape 1.4.0). |
| */ |
| duk_hbuffer *h_srcbuf; |
| h_srcbuf = DUK_TVAL_GET_BUFFER(tv); |
| elem_length_signed = (duk_int_t) DUK_HBUFFER_GET_SIZE(h_srcbuf); |
| copy_mode = 2; /* XXX: could add fast path for u8 compatible views */ |
| } else { |
| /* Non-object argument is simply int coerced, matches |
| * V8 behavior (except for "null", which we coerce to |
| * 0 but V8 TypeErrors). |
| */ |
| elem_length_signed = duk_to_int(ctx, 0); |
| copy_mode = 3; |
| } |
| if (elem_length_signed < 0) { |
| goto fail_arguments; |
| } |
| elem_length = (duk_uint_t) elem_length_signed; |
| byte_length = (duk_uint_t) (elem_length << shift); |
| if ((byte_length >> shift) != elem_length) { |
| /* Byte length would overflow. */ |
| /* XXX: easier check with less code? */ |
| goto fail_arguments; |
| } |
| |
| DUK_DDD(DUK_DDDPRINT("elem_length=%ld, byte_length=%ld", |
| (long) elem_length, (long) byte_length)); |
| |
| /* ArrayBuffer argument is handled specially above; the rest of the |
| * argument variants are handled by shared code below. |
| */ |
| |
| /* Push a new ArrayBuffer (becomes view .buffer) */ |
| h_bufarr = duk__push_arraybuffer_with_length(ctx, byte_length); |
| DUK_ASSERT(h_bufarr != NULL); |
| h_val = h_bufarr->buf; |
| DUK_ASSERT(h_val != NULL); |
| |
| /* Push the resulting view object and attach the ArrayBuffer. */ |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(class_num), |
| proto_bidx); |
| |
| h_bufobj->buf = h_val; |
| DUK_HBUFFER_INCREF(thr, h_val); |
| DUK_ASSERT(h_bufobj->offset == 0); |
| h_bufobj->length = byte_length; |
| h_bufobj->shift = (duk_uint8_t) shift; |
| h_bufobj->elem_type = (duk_uint8_t) elem_type; |
| h_bufobj->is_view = 1; |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| /* Set .buffer */ |
| duk_dup(ctx, -2); |
| duk_xdef_prop_stridx(ctx, -2, DUK_STRIDX_LC_BUFFER, DUK_PROPDESC_FLAGS_NONE); |
| duk_compact(ctx, -1); |
| |
| /* Copy values, the copy method depends on the arguments. |
| * |
| * Copy mode decision may depend on the validity of the underlying |
| * buffer of the source argument; there must be no harmful side effects |
| * from there to here for copy_mode to still be valid. |
| */ |
| DUK_DDD(DUK_DDDPRINT("copy mode: %d", (int) copy_mode)); |
| switch (copy_mode) { |
| case 0: { |
| /* Use byte copy. */ |
| |
| duk_uint8_t *p_src; |
| duk_uint8_t *p_dst; |
| |
| DUK_ASSERT(h_bufobj != NULL); |
| DUK_ASSERT(h_bufobj->buf != NULL); |
| DUK_ASSERT(DUK_HBUFFEROBJECT_VALID_SLICE(h_bufobj)); |
| DUK_ASSERT(h_bufarg != NULL); |
| DUK_ASSERT(h_bufarg->buf != NULL); |
| DUK_ASSERT(DUK_HBUFFEROBJECT_VALID_SLICE(h_bufarg)); |
| |
| p_dst = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufobj); |
| p_src = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufarg); |
| |
| DUK_DDD(DUK_DDDPRINT("using memcpy: p_src=%p, p_dst=%p, byte_length=%ld", |
| (void *) p_src, (void *) p_dst, (long) byte_length)); |
| |
| DUK_MEMCPY((void *) p_dst, (const void *) p_src, (size_t) byte_length); |
| break; |
| } |
| case 1: { |
| /* Copy values through direct validated reads and writes. */ |
| |
| duk_small_uint_t src_elem_size; |
| duk_small_uint_t dst_elem_size; |
| duk_uint8_t *p_src; |
| duk_uint8_t *p_src_end; |
| duk_uint8_t *p_dst; |
| |
| DUK_ASSERT(h_bufobj != NULL); |
| DUK_ASSERT(h_bufobj->buf != NULL); |
| DUK_ASSERT(DUK_HBUFFEROBJECT_VALID_SLICE(h_bufobj)); |
| DUK_ASSERT(h_bufarg != NULL); |
| DUK_ASSERT(h_bufarg->buf != NULL); |
| DUK_ASSERT(DUK_HBUFFEROBJECT_VALID_SLICE(h_bufarg)); |
| |
| src_elem_size = 1 << h_bufarg->shift; |
| dst_elem_size = elem_size; |
| |
| p_src = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufarg); |
| p_dst = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufobj); |
| p_src_end = p_src + h_bufarg->length; |
| |
| DUK_DDD(DUK_DDDPRINT("using fast copy: p_src=%p, p_src_end=%p, p_dst=%p, " |
| "src_elem_size=%d, dst_elem_size=%d", |
| (void *) p_src, (void *) p_src_end, (void *) p_dst, |
| (int) src_elem_size, (int) dst_elem_size)); |
| |
| while (p_src != p_src_end) { |
| DUK_DDD(DUK_DDDPRINT("fast path per element copy loop: " |
| "p_src=%p, p_src_end=%p, p_dst=%p", |
| (void *) p_src, (void *) p_src_end, (void *) p_dst)); |
| /* A validated read() is always a number, so it's write coercion |
| * is always side effect free an won't invalidate pointers etc. |
| */ |
| duk_hbufferobject_push_validated_read(ctx, h_bufarg, p_src, src_elem_size); |
| duk_hbufferobject_validated_write(ctx, h_bufobj, p_dst, dst_elem_size); |
| duk_pop(ctx); |
| p_src += src_elem_size; |
| p_dst += dst_elem_size; |
| } |
| break; |
| } |
| case 2: { |
| /* Copy values by index reads and writes. Let virtual |
| * property handling take care of coercion. |
| */ |
| duk_uint_t i; |
| |
| DUK_DDD(DUK_DDDPRINT("using slow copy")); |
| |
| for (i = 0; i < elem_length; i++) { |
| duk_get_prop_index(ctx, 0, (duk_uarridx_t) i); |
| duk_put_prop_index(ctx, -2, (duk_uarridx_t) i); |
| } |
| break; |
| } |
| default: |
| case 3: { |
| /* No copy, leave zero bytes in the buffer. There's no |
| * ambiguity with Float32/Float64 because zero bytes also |
| * represent 0.0. |
| */ |
| #if !defined(DUK_USE_ZERO_BUFFER_DATA) |
| /* Khronos/ES6 requires zeroing even when DUK_USE_ZERO_BUFFER_DATA |
| * is not set. |
| */ |
| DUK_ASSERT(!DUK_HBUFFER_HAS_DYNAMIC((duk_hbuffer *) h_val)); |
| DUK_MEMZERO((void *) DUK_HBUFFER_FIXED_GET_DATA_PTR(thr->heap, h_val), (duk_size_t) byte_length); |
| #endif |
| |
| DUK_DDD(DUK_DDDPRINT("using no copy")); |
| break; |
| } |
| } |
| |
| return 1; |
| |
| fail_arguments: |
| return DUK_RET_RANGE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_typedarray_constructor(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_dataview_constructor(duk_context *ctx) { |
| duk_hbufferobject *h_bufarg; |
| duk_hbufferobject *h_bufobj; |
| duk_hbuffer *h_val; |
| duk_uint_t offset; |
| duk_uint_t length; |
| |
| /* XXX: function flag to make this automatic? */ |
| if (!duk_is_constructor_call(ctx)) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| h_bufarg = duk__require_bufobj_value(ctx, 0); |
| DUK_ASSERT(h_bufarg != NULL); |
| |
| duk__resolve_offset_opt_length(ctx, h_bufarg, 1, 2, &offset, &length, 1 /*throw_flag*/); |
| DUK_ASSERT(offset <= h_bufarg->length); |
| DUK_ASSERT(offset + length <= h_bufarg->length); |
| |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(DUK_HOBJECT_CLASS_DATAVIEW), |
| DUK_BIDX_DATAVIEW_PROTOTYPE); |
| |
| h_val = h_bufarg->buf; |
| if (h_val == NULL) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| h_bufobj->buf = h_val; |
| DUK_HBUFFER_INCREF(thr, h_val); |
| h_bufobj->offset = h_bufarg->offset + offset; |
| h_bufobj->length = length; |
| DUK_ASSERT(h_bufobj->shift == 0); |
| DUK_ASSERT(h_bufobj->elem_type == DUK_HBUFFEROBJECT_ELEM_UINT8); |
| h_bufobj->is_view = 1; |
| |
| /* The DataView .buffer property is ordinarily set to the argument |
| * which is an ArrayBuffer. We accept any duk_hbufferobject as |
| * an argument and .buffer will be set to the argument regardless |
| * of what it is. This may be a bit confusing if the argument |
| * is e.g. a DataView or another TypedArray view. |
| * |
| * XXX: Copy .buffer property from a DataView/TypedArray argument? |
| * Create a fresh ArrayBuffer for Duktape.Buffer and Node.js Buffer |
| * arguments? See: test-bug-dataview-buffer-prop.js. |
| */ |
| |
| duk_dup(ctx, 0); |
| duk_xdef_prop_stridx(ctx, -2, DUK_STRIDX_LC_BUFFER, DUK_PROPDESC_FLAGS_NONE); |
| duk_compact(ctx, -1); |
| |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_dataview_constructor(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * ArrayBuffer.isView() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_arraybuffer_isview(duk_context *ctx) { |
| duk_hobject *h_obj; |
| duk_bool_t ret = 0; |
| |
| h_obj = duk_get_hobject(ctx, 0); |
| if (h_obj != NULL && DUK_HOBJECT_IS_BUFFEROBJECT(h_obj)) { |
| ret = ((duk_hbufferobject *) h_obj)->is_view; |
| } |
| duk_push_boolean(ctx, ret); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_arraybuffer_isview(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer: toString([encoding], [start], [end]) |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_tostring(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_this; |
| duk_int_t start_offset, end_offset; |
| duk_uint8_t *buf_slice; |
| duk_size_t slice_length; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| h_this = duk__get_bufobj_this(ctx); |
| if (h_this == NULL) { |
| /* XXX: happens e.g. when evaluating: String(Buffer.prototype). */ |
| duk_push_string(ctx, "[object Object]"); |
| return 1; |
| } |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_this); |
| |
| /* ignore encoding for now */ |
| |
| duk__clamp_startend_nonegidx_noshift(ctx, h_this, 1 /*idx_start*/, 2 /*idx_end*/, &start_offset, &end_offset); |
| |
| slice_length = (duk_size_t) (end_offset - start_offset); |
| buf_slice = (duk_uint8_t *) duk_push_fixed_buffer(ctx, slice_length); |
| DUK_ASSERT(buf_slice != NULL); |
| |
| if (h_this->buf == NULL) { |
| goto type_error; |
| } |
| |
| if (DUK_HBUFFEROBJECT_VALID_BYTEOFFSET_EXCL(h_this, start_offset + slice_length)) { |
| DUK_MEMCPY((void *) buf_slice, |
| (const void *) (DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this) + start_offset), |
| (size_t) slice_length); |
| } else { |
| /* not covered, return all zeroes */ |
| ; |
| } |
| |
| duk_to_string(ctx, -1); |
| return 1; |
| |
| type_error: |
| return DUK_RET_TYPE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_tostring(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Duktape.Buffer: toString(), valueOf() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_prototype_tostring_shared(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_tval *tv; |
| duk_small_int_t to_string = duk_get_current_magic(ctx); |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| tv = duk_get_borrowed_this_tval(ctx); |
| DUK_ASSERT(tv != NULL); |
| |
| if (DUK_TVAL_IS_BUFFER(tv)) { |
| duk_hbuffer *h_buf; |
| h_buf = DUK_TVAL_GET_BUFFER(tv); |
| DUK_ASSERT(h_buf != NULL); |
| duk_push_hbuffer(ctx, h_buf); |
| } else if (DUK_TVAL_IS_OBJECT(tv)) { |
| duk_hobject *h; |
| duk_hbufferobject *h_bufobj; |
| |
| /* Accept any duk_hbufferobject, though we're only normally |
| * called for Duktape.Buffer values. |
| */ |
| h = DUK_TVAL_GET_OBJECT(tv); |
| DUK_ASSERT(h != NULL); |
| if (!DUK_HOBJECT_IS_BUFFEROBJECT(h)) { |
| DUK_DD(DUK_DDPRINT("toString/valueOf() called for a non-bufferobject object")); |
| goto type_error; |
| } |
| h_bufobj = (duk_hbufferobject *) h; |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| |
| if (h_bufobj->buf == NULL) { |
| DUK_DD(DUK_DDPRINT("toString/valueOf() called for a bufferobject with NULL buf")); |
| goto type_error; |
| } |
| duk_push_hbuffer(ctx, h_bufobj->buf); |
| } else { |
| goto type_error; |
| } |
| |
| if (to_string) { |
| duk_to_string(ctx, -1); |
| } |
| return 1; |
| |
| type_error: |
| return DUK_RET_TYPE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_prototype_tostring_shared(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.prototype: toJSON() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_tojson(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_this; |
| duk_uint8_t *buf; |
| duk_uint_t i; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| h_this = duk__require_bufobj_this(ctx); |
| DUK_ASSERT(h_this != NULL); |
| |
| if (h_this->buf == NULL || !DUK_HBUFFEROBJECT_VALID_SLICE(h_this)) { |
| /* Serialize uncovered backing buffer as a null; doesn't |
| * really matter as long we're memory safe. |
| */ |
| duk_push_null(ctx); |
| return 1; |
| } |
| |
| duk_push_object(ctx); |
| duk_push_hstring_stridx(ctx, DUK_STRIDX_UC_BUFFER); |
| duk_put_prop_stridx(ctx, -2, DUK_STRIDX_TYPE); |
| |
| duk_push_array(ctx); |
| for (i = 0; i < h_this->length; i++) { |
| /* XXX: regetting the pointer may be overkill - we're writing |
| * to a side-effect free array here. |
| */ |
| DUK_ASSERT(h_this->buf != NULL); |
| buf = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this); |
| duk_push_uint(ctx, (duk_uint_t) buf[i]); |
| duk_put_prop_index(ctx, -2, (duk_idx_t) i); |
| } |
| duk_put_prop_stridx(ctx, -2, DUK_STRIDX_DATA); |
| |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_tojson(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.prototype.equals() |
| * Node.js Buffer.prototype.compare() |
| * Node.js Buffer.compare() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_compare_shared(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_small_uint_t magic; |
| duk_hbufferobject *h_bufarg1; |
| duk_hbufferobject *h_bufarg2; |
| duk_small_int_t comp_res; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| magic = duk_get_current_magic(ctx); |
| if (magic & 0x02) { |
| /* Static call style. */ |
| h_bufarg1 = duk__require_bufobj_value(ctx, 0); |
| h_bufarg2 = duk__require_bufobj_value(ctx, 1); |
| } else { |
| h_bufarg1 = duk__require_bufobj_this(ctx); |
| h_bufarg2 = duk__require_bufobj_value(ctx, 0); |
| } |
| DUK_ASSERT(h_bufarg1 != NULL); |
| DUK_ASSERT(h_bufarg2 != NULL); |
| |
| /* We want to compare the slice/view areas of the arguments. |
| * If either slice/view is invalid (underlying buffer is shorter) |
| * ensure equals() is false, but otherwise the only thing that |
| * matters is to be memory safe. |
| */ |
| |
| if (DUK_HBUFFEROBJECT_VALID_SLICE(h_bufarg1) && |
| DUK_HBUFFEROBJECT_VALID_SLICE(h_bufarg2)) { |
| comp_res = duk_js_data_compare((const duk_uint8_t *) DUK_HBUFFER_GET_DATA_PTR(thr->heap, h_bufarg1->buf) + h_bufarg1->offset, |
| (const duk_uint8_t *) DUK_HBUFFER_GET_DATA_PTR(thr->heap, h_bufarg2->buf) + h_bufarg2->offset, |
| (duk_size_t) h_bufarg1->length, |
| (duk_size_t) h_bufarg2->length); |
| } else { |
| comp_res = -1; /* either nonzero value is ok */ |
| } |
| |
| if (magic & 0x01) { |
| /* compare: similar to string comparison but for buffer data. */ |
| duk_push_int(ctx, comp_res); |
| } else { |
| /* equals */ |
| duk_push_boolean(ctx, (comp_res == 0)); |
| } |
| |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_compare_shared(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.prototype.fill() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_fill(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_this; |
| const duk_uint8_t *fill_str_ptr; |
| duk_size_t fill_str_len; |
| duk_uint8_t fill_value; |
| duk_int_t fill_offset; |
| duk_int_t fill_end; |
| duk_size_t fill_length; |
| duk_uint8_t *p; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| h_this = duk__require_bufobj_this(ctx); |
| DUK_ASSERT(h_this != NULL); |
| if (h_this->buf == NULL) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| /* [ value offset end ] */ |
| |
| if (duk_is_string(ctx, 0)) { |
| fill_str_ptr = (const duk_uint8_t *) duk_get_lstring(ctx, 0, &fill_str_len); |
| DUK_ASSERT(fill_str_ptr != NULL); |
| } else { |
| fill_value = (duk_uint8_t) duk_to_uint32(ctx, 0); |
| fill_str_ptr = (const duk_uint8_t *) &fill_value; |
| fill_str_len = 1; |
| } |
| |
| /* Fill offset handling is more lenient than in Node.js. */ |
| |
| duk__clamp_startend_nonegidx_noshift(ctx, h_this, 1 /*idx_start*/, 2 /*idx_end*/, &fill_offset, &fill_end); |
| |
| DUK_DDD(DUK_DDDPRINT("fill: fill_value=%02x, fill_offset=%ld, fill_end=%ld, view length=%ld", |
| (unsigned int) fill_value, (long) fill_offset, (long) fill_end, (long) h_this->length)); |
| |
| DUK_ASSERT(fill_end - fill_offset >= 0); |
| DUK_ASSERT(h_this->buf != NULL); |
| |
| p = (DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this) + fill_offset); |
| fill_length = (duk_size_t) (fill_end - fill_offset); |
| if (fill_str_len == 1) { |
| /* Handle single character fills as memset() even when |
| * the fill data comes from a one-char argument. |
| */ |
| DUK_MEMSET((void *) p, (int) fill_str_ptr[0], (size_t) fill_length); |
| } else if (fill_str_len > 1) { |
| duk_size_t i, n, t; |
| |
| for (i = 0, n = (fill_end - fill_offset), t = 0; i < n; i++) { |
| p[i] = fill_str_ptr[t++]; |
| if (t >= fill_str_len) { |
| t = 0; |
| } |
| } |
| } else { |
| DUK_DDD(DUK_DDDPRINT("zero size fill pattern, ignore silently")); |
| } |
| |
| /* Return the Buffer to allow chaining: b.fill(0x11).fill(0x22, 3, 5).toString() */ |
| duk_push_this(ctx); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_fill(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.prototype.write(string, [offset], [length], [encoding]) |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_write(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_this; |
| duk_uint_t offset; |
| duk_uint_t length; |
| const duk_uint8_t *str_data; |
| duk_size_t str_len; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| h_this = duk__require_bufobj_this(ctx); |
| DUK_ASSERT(h_this != NULL); |
| |
| /* Argument must be a string, e.g. a buffer is not allowed. */ |
| str_data = (const duk_uint8_t *) duk_require_lstring(ctx, 0, &str_len); |
| |
| duk__resolve_offset_opt_length(ctx, h_this, 1, 2, &offset, &length, 0 /*throw_flag*/); |
| DUK_ASSERT(offset <= h_this->length); |
| DUK_ASSERT(offset + length <= h_this->length); |
| |
| /* XXX: encoding is ignored now. */ |
| |
| if (length > str_len) { |
| length = (duk_uint_t) str_len; |
| } |
| |
| if (DUK_HBUFFEROBJECT_VALID_SLICE(h_this)) { |
| /* Cannot overlap. */ |
| DUK_MEMCPY((void *) (DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this) + offset), |
| (const void *) str_data, |
| (size_t) length); |
| } else { |
| DUK_DDD(DUK_DDDPRINT("write() target buffer is not covered, silent ignore")); |
| } |
| |
| duk_push_uint(ctx, length); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_write(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.prototype.copy() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_copy(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_this; |
| duk_hbufferobject *h_bufarg; |
| duk_int_t source_length; |
| duk_int_t target_length; |
| duk_int_t target_start, source_start, source_end; |
| duk_uint_t target_ustart, source_ustart, source_uend; |
| duk_uint_t copy_size = 0; |
| |
| /* [ targetBuffer targetStart sourceStart sourceEnd ] */ |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| h_this = duk__require_bufobj_this(ctx); |
| h_bufarg = duk__require_bufobj_value(ctx, 0); |
| DUK_ASSERT(h_this != NULL); |
| DUK_ASSERT(h_bufarg != NULL); |
| source_length = (duk_int_t) h_this->length; |
| target_length = (duk_int_t) h_bufarg->length; |
| |
| target_start = duk_to_int(ctx, 1); |
| source_start = duk_to_int(ctx, 2); |
| if (duk_is_undefined(ctx, 3)) { |
| source_end = source_length; |
| } else { |
| source_end = duk_to_int(ctx, 3); |
| } |
| |
| DUK_DDD(DUK_DDDPRINT("checking copy args: target_start=%ld, target_length=%ld, " |
| "source_start=%ld, source_end=%ld, source_length=%ld", |
| (long) target_start, (long) h_bufarg->length, |
| (long) source_start, (long) source_end, (long) source_length)); |
| |
| /* This behavior mostly mimics Node.js now. */ |
| |
| if (source_start < 0 || source_end < 0 || target_start < 0) { |
| /* Negative offsets cause a RangeError. */ |
| goto fail_bounds; |
| } |
| source_ustart = (duk_uint_t) source_start; |
| source_uend = (duk_uint_t) source_end; |
| target_ustart = (duk_uint_t) target_start; |
| if (source_ustart >= source_uend || /* crossed offsets or zero size */ |
| source_ustart >= (duk_uint_t) source_length || /* source out-of-bounds (but positive) */ |
| target_ustart >= (duk_uint_t) target_length) { /* target out-of-bounds (but positive) */ |
| goto silent_ignore; |
| } |
| if (source_uend >= (duk_uint_t) source_length) { |
| /* Source end clamped silently to available length. */ |
| source_uend = source_length; |
| } |
| copy_size = source_uend - source_ustart; |
| if (target_ustart + copy_size > (duk_uint_t) target_length) { |
| /* Clamp to target's end if too long. |
| * |
| * NOTE: there's no overflow possibility in the comparison; |
| * both target_ustart and copy_size are >= 0 and based on |
| * values in duk_int_t range. Adding them as duk_uint_t |
| * values is then guaranteed not to overflow. |
| */ |
| DUK_ASSERT(target_ustart + copy_size >= target_ustart); /* no overflow */ |
| DUK_ASSERT(target_ustart + copy_size >= copy_size); /* no overflow */ |
| copy_size = (duk_uint_t) target_length - target_ustart; |
| } |
| |
| DUK_DDD(DUK_DDDPRINT("making copy: target_ustart=%lu source_ustart=%lu copy_size=%lu", |
| (unsigned long) target_ustart, (unsigned long) source_ustart, |
| (unsigned long) copy_size)); |
| |
| DUK_ASSERT(copy_size >= 1); |
| DUK_ASSERT(source_ustart <= (duk_uint_t) source_length); |
| DUK_ASSERT(source_ustart + copy_size <= (duk_uint_t) source_length); |
| DUK_ASSERT(target_ustart <= (duk_uint_t) target_length); |
| DUK_ASSERT(target_ustart + copy_size <= (duk_uint_t) target_length); |
| |
| /* Ensure copy is covered by underlying buffers. */ |
| DUK_ASSERT(h_bufarg->buf != NULL); /* length check */ |
| DUK_ASSERT(h_this->buf != NULL); /* length check */ |
| if (DUK_HBUFFEROBJECT_VALID_BYTEOFFSET_EXCL(h_bufarg, target_ustart + copy_size) && |
| DUK_HBUFFEROBJECT_VALID_BYTEOFFSET_EXCL(h_this, source_ustart + copy_size)) { |
| /* Must use memmove() because copy area may overlap (source and target |
| * buffer may be the same, or from different slices. |
| */ |
| DUK_MEMMOVE((void *) (DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufarg) + target_ustart), |
| (const void *) (DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this) + source_ustart), |
| (size_t) copy_size); |
| } else { |
| DUK_DDD(DUK_DDDPRINT("buffer copy not covered by underlying buffer(s), ignoring")); |
| } |
| |
| silent_ignore: |
| /* Return value is like write(), number of bytes written. |
| * The return value matters because of code like: |
| * "off += buf.copy(...)". |
| */ |
| duk_push_uint(ctx, copy_size); |
| return 1; |
| |
| fail_bounds: |
| return DUK_RET_RANGE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_copy(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * TypedArray.prototype.set() |
| * |
| * TypedArray set() is pretty interesting to implement because: |
| * |
| * - The source argument may be a plain array or a typedarray. If the |
| * source is a TypedArray, values are decoded and re-encoded into the |
| * target (not as a plain byte copy). This may happen even when the |
| * element byte size is the same, e.g. integer values may be re-encoded |
| * into floats. |
| * |
| * - Source and target may refer to the same underlying buffer, so that |
| * the set() operation may overlap. The specification requires that this |
| * must work as if a copy was made before the operation. Note that this |
| * is NOT a simple memmove() situation because the source and target |
| * byte sizes may be different -- e.g. a 4-byte source (Int8Array) may |
| * expand to a 16-byte target (Uint32Array) so that the target overlaps |
| * the source both from beginning and the end (unlike in typical memmove). |
| * |
| * - Even if 'buf' pointers of the source and target differ, there's no |
| * guarantee that their memory areas don't overlap. This may be the |
| * case with external buffers. |
| * |
| * Even so, it is nice to optimize for the common case: |
| * |
| * - Source and target separate buffers or non-overlapping. |
| * |
| * - Source and target have a compatible type so that a plain byte copy |
| * is possible. Note that while e.g. uint8 and int8 are compatible |
| * (coercion one way or another doesn't change the byte representation), |
| * e.g. int8 and uint8clamped are NOT compatible when writing int8 |
| * values into uint8clamped typedarray (-1 would clamp to 0 for instance). |
| * |
| * See test-bi-typedarray-proto-set.js. |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_typedarray_set(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hbufferobject *h_this; |
| duk_hobject *h_obj; |
| duk_uarridx_t i, n; |
| duk_int_t offset_signed; |
| duk_uint_t offset_elems; |
| duk_uint_t offset_bytes; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| h_this = duk__require_bufobj_this(ctx); |
| DUK_ASSERT(h_this != NULL); |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_this); |
| |
| if (h_this->buf == NULL) { |
| DUK_DDD(DUK_DDDPRINT("source neutered, skip copy")); |
| return 0; |
| } |
| |
| h_obj = duk_require_hobject(ctx, 0); |
| DUK_ASSERT(h_obj != NULL); |
| |
| /* XXX: V8 throws a TypeError for negative values. Would it |
| * be more useful to interpret negative offsets here from the |
| * end of the buffer too? |
| */ |
| offset_signed = duk_to_int(ctx, 1); |
| if (offset_signed < 0) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| offset_elems = (duk_uint_t) offset_signed; |
| offset_bytes = offset_elems << h_this->shift; |
| if ((offset_bytes >> h_this->shift) != offset_elems) { |
| /* Byte length would overflow. */ |
| /* XXX: easier check with less code? */ |
| return DUK_RET_RANGE_ERROR; |
| } |
| if (offset_bytes > h_this->length) { |
| /* Equality may be OK but >length not. Checking |
| * this explicitly avoids some overflow cases |
| * below. |
| */ |
| return DUK_RET_RANGE_ERROR; |
| } |
| DUK_ASSERT(offset_bytes <= h_this->length); |
| |
| /* Fast path: source is a TypedArray (or any bufferobject). */ |
| |
| if (DUK_HOBJECT_IS_BUFFEROBJECT(h_obj)) { |
| duk_hbufferobject *h_bufarg; |
| duk_uint16_t comp_mask; |
| duk_small_int_t no_overlap = 0; |
| duk_uint_t src_length; |
| duk_uint_t dst_length; |
| duk_uint_t dst_length_elems; |
| duk_uint8_t *p_src_base; |
| duk_uint8_t *p_src_end; |
| duk_uint8_t *p_src; |
| duk_uint8_t *p_dst_base; |
| duk_uint8_t *p_dst; |
| duk_small_uint_t src_elem_size; |
| duk_small_uint_t dst_elem_size; |
| |
| h_bufarg = (duk_hbufferobject *) h_obj; |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufarg); |
| |
| if (h_bufarg->buf == NULL) { |
| DUK_DDD(DUK_DDDPRINT("target neutered, skip copy")); |
| return 0; |
| } |
| |
| /* Nominal size check. */ |
| src_length = h_bufarg->length; /* bytes in source */ |
| dst_length_elems = (src_length >> h_bufarg->shift); /* elems in source and dest */ |
| dst_length = dst_length_elems << h_this->shift; /* bytes in dest */ |
| if ((dst_length >> h_this->shift) != dst_length_elems) { |
| /* Byte length would overflow. */ |
| /* XXX: easier check with less code? */ |
| return DUK_RET_RANGE_ERROR; |
| } |
| DUK_DDD(DUK_DDDPRINT("nominal size check: src_length=%ld, dst_length=%ld", |
| (long) src_length, (long) dst_length)); |
| DUK_ASSERT(offset_bytes <= h_this->length); |
| if (dst_length > h_this->length - offset_bytes) { |
| /* Overflow not an issue because subtraction is used on the right |
| * side and guaranteed to be >= 0. |
| */ |
| DUK_DDD(DUK_DDDPRINT("copy exceeds target buffer nominal length")); |
| return DUK_RET_RANGE_ERROR; |
| } |
| if (!DUK_HBUFFEROBJECT_VALID_BYTEOFFSET_EXCL(h_this, offset_bytes + dst_length)) { |
| DUK_DDD(DUK_DDDPRINT("copy not covered by underlying target buffer, ignore")); |
| return 0; |
| } |
| |
| p_src_base = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufarg); |
| p_dst_base = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this) + offset_bytes; |
| |
| /* Check actual underlying buffers for validity and that they |
| * cover the copy. No side effects are allowed after the check |
| * so that the validity status doesn't change. |
| */ |
| if (!DUK_HBUFFEROBJECT_VALID_SLICE(h_this) || |
| !DUK_HBUFFEROBJECT_VALID_SLICE(h_bufarg)) { |
| /* The condition could be more narrow and check for the |
| * copy area only, but there's no need for fine grained |
| * behavior when the underlying buffer is misconfigured. |
| */ |
| DUK_DDD(DUK_DDDPRINT("source and/or target not covered by underlying buffer, skip copy")); |
| return 0; |
| } |
| |
| /* We want to do a straight memory copy if possible: this is |
| * an important operation because .set() is the TypedArray |
| * way to copy chunks of memory. However, because set() |
| * conceptually works in terms of elements, not all views are |
| * compatible with direct byte copying. |
| * |
| * If we do manage a direct copy, the "overlap issue" handled |
| * below can just be solved using memmove() because the source |
| * and destination element sizes are necessarily equal. |
| */ |
| |
| DUK_ASSERT(h_this->elem_type < sizeof(duk__buffer_elemtype_copy_compatible) / sizeof(duk_uint16_t)); |
| comp_mask = duk__buffer_elemtype_copy_compatible[h_this->elem_type]; |
| if (comp_mask & (1 << h_bufarg->elem_type)) { |
| DUK_ASSERT(src_length == dst_length); |
| |
| DUK_DDD(DUK_DDDPRINT("fast path: able to use memmove() because views are compatible")); |
| DUK_MEMMOVE((void *) p_dst_base, (const void *) p_src_base, (size_t) dst_length); |
| return 0; |
| } |
| DUK_DDD(DUK_DDDPRINT("fast path: views are not compatible with a byte copy, copy by item")); |
| |
| /* We want to avoid making a copy to process set() but that's |
| * not always possible: the source and the target may overlap |
| * and because element sizes are different, the overlap cannot |
| * always be handled with a memmove() or choosing the copy |
| * direction in a certain way. For example, if source type is |
| * uint8 and target type is uint32, the target area may exceed |
| * the source area from both ends! |
| * |
| * Note that because external buffers may point to the same |
| * memory areas, we must ultimately make this check using |
| * pointers. |
| * |
| * NOTE: careful with side effects: any side effect may cause |
| * a buffer resize (or external buffer pointer/length update)! |
| */ |
| |
| DUK_DDD(DUK_DDDPRINT("overlap check: p_src_base=%p, src_length=%ld, " |
| "p_dst_base=%p, dst_length=%ld", |
| (void *) p_src_base, (long) src_length, |
| (void *) p_dst_base, (long) dst_length)); |
| |
| if (p_src_base >= p_dst_base + dst_length || /* source starts after dest ends */ |
| p_src_base + src_length <= p_dst_base) { /* source ends before dest starts */ |
| no_overlap = 1; |
| } |
| |
| if (!no_overlap) { |
| /* There's overlap: the desired end result is that |
| * conceptually a copy is made to avoid "trampling" |
| * of source data by destination writes. We make |
| * an actual temporary copy to handle this case. |
| */ |
| duk_uint8_t *p_src_copy; |
| |
| DUK_DDD(DUK_DDDPRINT("there is overlap, make a copy of the source")); |
| p_src_copy = (duk_uint8_t *) duk_push_fixed_buffer(ctx, src_length); |
| DUK_ASSERT(p_src_copy != NULL); |
| DUK_MEMCPY((void *) p_src_copy, (const void *) p_src_base, (size_t) src_length); |
| |
| p_src_base = p_src_copy; /* use p_src_base from now on */ |
| } |
| /* Value stack intentionally mixed size here. */ |
| |
| DUK_DDD(DUK_DDDPRINT("after overlap check: p_src_base=%p, src_length=%ld, " |
| "p_dst_base=%p, dst_length=%ld, valstack top=%ld", |
| (void *) p_src_base, (long) src_length, |
| (void *) p_dst_base, (long) dst_length, |
| (long) duk_get_top(ctx))); |
| |
| /* Ready to make the copy. We must proceed element by element |
| * and must avoid any side effects that might cause the buffer |
| * validity check above to become invalid. |
| * |
| * Although we work through the value stack here, only plain |
| * numbers are handled which should be side effect safe. |
| */ |
| |
| src_elem_size = 1 << h_bufarg->shift; |
| dst_elem_size = 1 << h_this->shift; |
| p_src = p_src_base; |
| p_dst = p_dst_base; |
| p_src_end = p_src_base + src_length; |
| |
| while (p_src != p_src_end) { |
| DUK_DDD(DUK_DDDPRINT("fast path per element copy loop: " |
| "p_src=%p, p_src_end=%p, p_dst=%p", |
| (void *) p_src, (void *) p_src_end, (void *) p_dst)); |
| /* A validated read() is always a number, so it's write coercion |
| * is always side effect free an won't invalidate pointers etc. |
| */ |
| duk_hbufferobject_push_validated_read(ctx, h_bufarg, p_src, src_elem_size); |
| duk_hbufferobject_validated_write(ctx, h_this, p_dst, dst_elem_size); |
| duk_pop(ctx); |
| p_src += src_elem_size; |
| p_dst += dst_elem_size; |
| } |
| |
| return 0; |
| } else { |
| /* Slow path: quite slow, but we save space by using the property code |
| * to write coerce target values. We don't need to worry about overlap |
| * here because the source is not a TypedArray. |
| * |
| * We could use the bufferobject write coercion helper but since the |
| * property read may have arbitrary side effects, full validity checks |
| * would be needed for every element anyway. |
| */ |
| |
| n = (duk_uarridx_t) duk_get_length(ctx, 0); |
| DUK_ASSERT(offset_bytes <= h_this->length); |
| if ((n << h_this->shift) > h_this->length - offset_bytes) { |
| /* Overflow not an issue because subtraction is used on the right |
| * side and guaranteed to be >= 0. |
| */ |
| DUK_DDD(DUK_DDDPRINT("copy exceeds target buffer nominal length")); |
| return DUK_RET_RANGE_ERROR; |
| } |
| |
| /* There's no need to check for buffer validity status for the |
| * target here: the property access code will do that for each |
| * element. Moreover, if we did check the validity here, side |
| * effects from reading the source argument might invalidate |
| * the results anyway. |
| */ |
| |
| DUK_ASSERT_TOP(ctx, 2); |
| duk_push_this(ctx); |
| |
| for (i = 0; i < n; i++) { |
| duk_get_prop_index(ctx, 0, i); |
| duk_put_prop_index(ctx, 2, offset_elems + i); |
| } |
| } |
| |
| return 0; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_typedarray_set(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.prototype.slice([start], [end]) |
| * ArrayBuffer.prototype.slice(begin, [end]) |
| * TypedArray.prototype.slice(begin, [end]) |
| * |
| * The API calls are almost identical; negative indices are counted from end |
| * of buffer, and final indices are clamped (allowing crossed indices). Main |
| * differences: |
| * |
| * - Copy/view behavior; Node.js .slice() and TypedArray .subarray() create |
| * views, ArrayBuffer .slice() creates a copy |
| * |
| * - Resulting object has a different class and prototype depending on the |
| * call (or 'this' argument) |
| * |
| * - TypedArray .subarray() arguments are element indices, not byte offsets |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_slice_shared(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_small_int_t magic; |
| duk_small_uint_t res_class_num; |
| duk_hobject *res_proto; |
| duk_hbufferobject *h_this; |
| duk_hbufferobject *h_bufobj; |
| duk_hbuffer *h_val; |
| duk_int_t start_offset, end_offset; |
| duk_uint_t slice_length; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| /* [ start end ] */ |
| |
| magic = duk_get_current_magic(ctx); |
| h_this = duk__require_bufobj_this(ctx); |
| |
| /* Slice offsets are element (not byte) offsets, which only matters |
| * for TypedArray views, Node.js Buffer and ArrayBuffer have shift |
| * zero so byte and element offsets are the same. Negative indices |
| * are counted from end of slice, crossed indices are allowed (and |
| * result in zero length result), and final values are clamped |
| * against the current slice. There's intentionally no check |
| * against the underlying buffer here. |
| */ |
| |
| duk__clamp_startend_negidx_shifted(ctx, h_this, 0 /*idx_start*/, 1 /*idx_end*/, &start_offset, &end_offset); |
| DUK_ASSERT(end_offset >= start_offset); |
| slice_length = (duk_uint_t) (end_offset - start_offset); |
| |
| /* The resulting buffer object gets the same class and prototype as |
| * the buffer in 'this', e.g. if the input is a Node.js Buffer the |
| * result is a Node.js Buffer; if the input is a Float32Array, the |
| * result is a Float32Array. |
| * |
| * For the class number this seems correct. The internal prototype |
| * is not so clear: if 'this' is a bufferobject with a non-standard |
| * prototype object, that value gets copied over into the result |
| * (instead of using the standard prototype for that object type). |
| */ |
| |
| res_class_num = DUK_HOBJECT_GET_CLASS_NUMBER((duk_hobject *) h_this); |
| h_bufobj = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(res_class_num), |
| DUK_BIDX_OBJECT_PROTOTYPE); /* replaced */ |
| DUK_ASSERT(h_bufobj != NULL); |
| res_proto = DUK_HOBJECT_GET_PROTOTYPE(thr->heap, (duk_hobject *) h_this); /* may be NULL */ |
| DUK_HOBJECT_SET_PROTOTYPE_UPDREF(thr, (duk_hobject *) h_bufobj, res_proto); |
| |
| h_bufobj->length = slice_length; |
| h_bufobj->shift = h_this->shift; /* inherit */ |
| h_bufobj->elem_type = h_this->elem_type; /* inherit */ |
| h_bufobj->is_view = magic & 0x01; |
| DUK_ASSERT(h_bufobj->is_view == 0 || h_bufobj->is_view == 1); |
| |
| h_val = h_this->buf; |
| if (h_val == NULL) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| if (magic & 0x02) { |
| /* non-zero: make copy */ |
| duk_uint8_t *p_copy; |
| duk_size_t copy_length; |
| |
| p_copy = (duk_uint8_t *) duk_push_fixed_buffer(ctx, (duk_size_t) slice_length); |
| DUK_ASSERT(p_copy != NULL); |
| |
| /* Copy slice, respecting underlying buffer limits; remainder |
| * is left as zero. |
| */ |
| copy_length = DUK_HBUFFEROBJECT_CLAMP_BYTELENGTH(h_this, slice_length); |
| DUK_MEMCPY((void *) p_copy, |
| (const void *) (DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this) + start_offset), |
| copy_length); |
| |
| h_val = duk_get_hbuffer(ctx, -1); |
| DUK_ASSERT(h_val != NULL); |
| |
| h_bufobj->buf = h_val; |
| DUK_HBUFFER_INCREF(thr, h_val); |
| DUK_ASSERT(h_bufobj->offset == 0); |
| |
| duk_pop(ctx); /* reachable so pop OK */ |
| } else { |
| h_bufobj->buf = h_val; |
| DUK_HBUFFER_INCREF(thr, h_val); |
| h_bufobj->offset = (duk_uint_t) (h_this->offset + start_offset); |
| |
| /* Copy the .buffer property, needed for TypedArray.prototype.subarray(). |
| * |
| * XXX: limit copy only for TypedArray classes specifically? |
| */ |
| |
| duk_push_this(ctx); |
| if (duk_get_prop_stridx(ctx, -1, DUK_STRIDX_LC_BUFFER)) { |
| duk_xdef_prop_stridx(ctx, -3, DUK_STRIDX_LC_BUFFER, DUK_PROPDESC_FLAGS_NONE); |
| duk_pop(ctx); |
| } else { |
| duk_pop_2(ctx); |
| } |
| } |
| /* unbalanced stack on purpose */ |
| |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufobj); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_slice_shared(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.isEncoding() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_is_encoding(duk_context *ctx) { |
| const char *encoding; |
| |
| /* only accept lowercase 'utf8' now. */ |
| |
| encoding = duk_to_string(ctx, 0); |
| DUK_ASSERT(duk_is_string(ctx, 0)); /* guaranteed by duk_to_string() */ |
| duk_push_boolean(ctx, DUK_STRCMP(encoding, "utf8") == 0); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_is_encoding(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.isBuffer() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_is_buffer(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_tval *tv; |
| duk_hobject *h; |
| duk_hobject *h_proto; |
| duk_bool_t ret = 0; |
| |
| thr = (duk_hthread *) ctx; |
| |
| DUK_ASSERT(duk_get_top(ctx) >= 1); /* nargs */ |
| tv = duk_get_tval(ctx, 0); |
| DUK_ASSERT(tv != NULL); |
| |
| if (DUK_TVAL_IS_OBJECT(tv)) { |
| h = DUK_TVAL_GET_OBJECT(tv); |
| DUK_ASSERT(h != NULL); |
| |
| h_proto = thr->builtins[DUK_BIDX_NODEJS_BUFFER_PROTOTYPE]; |
| DUK_ASSERT(h_proto != NULL); |
| |
| h = DUK_HOBJECT_GET_PROTOTYPE(thr->heap, h); |
| if (h) { |
| ret = duk_hobject_prototype_chain_contains(thr, h, h_proto, 0 /*ignore_loop*/); |
| } |
| } |
| |
| duk_push_boolean(ctx, ret); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_is_buffer(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.byteLength() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_byte_length(duk_context *ctx) { |
| const char *str; |
| duk_size_t len; |
| |
| /* At the moment Buffer(<str>) will just use the string bytes as |
| * is (ignoring encoding), so we return the string length here |
| * unconditionally. |
| */ |
| |
| str = duk_to_lstring(ctx, 0, &len); |
| DUK_UNREF(str); |
| duk_push_size_t(ctx, len); |
| return 1; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_byte_length(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Node.js Buffer.concat() |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_concat(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_hobject *h_arg; |
| duk_int_t total_length = 0; |
| duk_hbufferobject *h_bufobj; |
| duk_hbufferobject *h_bufres; |
| duk_hbuffer *h_val; |
| duk_uint_t i, n; |
| duk_uint8_t *p; |
| duk_size_t space_left; |
| duk_size_t copy_size; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| /* Node.js accepts only actual Arrays. */ |
| h_arg = duk_require_hobject(ctx, 0); |
| if (DUK_HOBJECT_GET_CLASS_NUMBER(h_arg) != DUK_HOBJECT_CLASS_ARRAY) { |
| return DUK_RET_TYPE_ERROR; |
| } |
| |
| /* Compute result length and validate argument buffers. */ |
| n = (duk_uint_t) duk_get_length(ctx, 0); |
| for (i = 0; i < n; i++) { |
| /* Neutered checks not necessary here: neutered buffers have |
| * zero 'length' so we'll effectively skip them. |
| */ |
| DUK_ASSERT_TOP(ctx, 2); /* [ array totalLength ] */ |
| duk_get_prop_index(ctx, 0, (duk_uarridx_t) i); /* -> [ array totalLength buf ] */ |
| h_bufobj = duk__require_bufobj_value(ctx, 2); |
| DUK_ASSERT(h_bufobj != NULL); |
| total_length += h_bufobj->length; |
| duk_pop(ctx); |
| } |
| if (n == 1) { |
| /* For the case n==1 Node.js doesn't seem to type check |
| * the sole member but we do it before returning it. |
| * For this case only the original buffer object is |
| * returned (not a copy). |
| */ |
| duk_get_prop_index(ctx, 0, 0); |
| return 1; |
| } |
| |
| /* User totalLength overrides a computed length, but we'll check |
| * every copy in the copy loop. Note that duk_to_uint() can |
| * technically have arbitrary side effects so we need to recheck |
| * the buffers in the copy loop. |
| */ |
| if (!duk_is_undefined(ctx, 1) && n > 0) { |
| /* For n == 0, Node.js ignores totalLength argument and |
| * returns a zero length buffer. |
| */ |
| total_length = duk_to_int(ctx, 1); |
| } |
| if (total_length < 0) { |
| return DUK_RET_RANGE_ERROR; |
| } |
| |
| h_bufres = duk_push_bufferobject_raw(ctx, |
| DUK_HOBJECT_FLAG_EXTENSIBLE | |
| DUK_HOBJECT_FLAG_BUFFEROBJECT | |
| DUK_HOBJECT_CLASS_AS_FLAGS(DUK_HOBJECT_CLASS_BUFFER), |
| DUK_BIDX_NODEJS_BUFFER_PROTOTYPE); |
| DUK_ASSERT(h_bufres != NULL); |
| |
| p = (duk_uint8_t *) duk_push_fixed_buffer(ctx, total_length); |
| DUK_ASSERT(p != NULL); |
| space_left = total_length; |
| |
| for (i = 0; i < n; i++) { |
| DUK_ASSERT_TOP(ctx, 4); /* [ array totalLength bufres buf ] */ |
| |
| duk_get_prop_index(ctx, 0, (duk_uarridx_t) i); |
| h_bufobj = duk__require_bufobj_value(ctx, 4); |
| DUK_ASSERT(h_bufobj != NULL); |
| |
| copy_size = h_bufobj->length; |
| if (copy_size > space_left) { |
| copy_size = space_left; |
| } |
| |
| if (h_bufobj->buf != NULL && |
| DUK_HBUFFEROBJECT_VALID_SLICE(h_bufobj)) { |
| DUK_MEMCPY((void *) p, |
| (const void *) DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_bufobj), |
| copy_size); |
| } else { |
| /* Just skip, leaving zeroes in the result. */ |
| ; |
| } |
| p += copy_size; |
| space_left -= copy_size; |
| |
| duk_pop(ctx); |
| } |
| |
| h_val = duk_get_hbuffer(ctx, -1); |
| DUK_ASSERT(h_val != NULL); |
| |
| duk__set_bufobj_buffer(ctx, h_bufres, h_val); |
| DUK_ASSERT_HBUFFEROBJECT_VALID(h_bufres); |
| |
| duk_pop(ctx); /* pop plain buffer, now reachable through h_bufres */ |
| |
| return 1; /* return h_bufres */ |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_nodejs_buffer_concat(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| /* |
| * Shared readfield and writefield methods |
| * |
| * The readfield/writefield methods need support for endianness and field |
| * types. All offsets are byte based so no offset shifting is needed. |
| */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* Format of magic, bits: |
| * 0...1: field type; 0=uint8, 1=uint16, 2=uint32, 3=float, 4=double, 5=unused, 6=unused, 7=unused |
| * 3: endianness: 0=little, 1=big |
| * 4: signed: 1=yes, 0=no |
| * 5: typedarray: 1=yes, 0=no |
| */ |
| #define DUK__FLD_8BIT 0 |
| #define DUK__FLD_16BIT 1 |
| #define DUK__FLD_32BIT 2 |
| #define DUK__FLD_FLOAT 3 |
| #define DUK__FLD_DOUBLE 4 |
| #define DUK__FLD_VARINT 5 |
| #define DUK__FLD_BIGENDIAN (1 << 3) |
| #define DUK__FLD_SIGNED (1 << 4) |
| #define DUK__FLD_TYPEDARRAY (1 << 5) |
| |
| /* XXX: split into separate functions for each field type? */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_readfield(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_small_int_t magic = (duk_small_int_t) duk_get_current_magic(ctx); |
| duk_small_int_t magic_ftype; |
| duk_small_int_t magic_bigendian; |
| duk_small_int_t magic_signed; |
| duk_small_int_t magic_typedarray; |
| duk_small_int_t endswap; |
| duk_hbufferobject *h_this; |
| duk_bool_t no_assert; |
| duk_int_t offset_signed; |
| duk_uint_t offset; |
| duk_uint_t buffer_length; |
| duk_uint_t check_length; |
| duk_uint8_t *buf; |
| duk_double_union du; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| magic_ftype = magic & 0x0007; |
| magic_bigendian = magic & 0x0008; |
| magic_signed = magic & 0x0010; |
| magic_typedarray = magic & 0x0020; |
| |
| h_this = duk__require_bufobj_this(ctx); |
| DUK_ASSERT(h_this != NULL); |
| buffer_length = h_this->length; |
| |
| /* [ offset noAssert ], when ftype != DUK__FLD_VARINT */ |
| /* [ offset fieldByteLength noAssert ], when ftype == DUK__FLD_VARINT */ |
| /* [ offset littleEndian ], when DUK__FLD_TYPEDARRAY (regardless of ftype) */ |
| |
| /* Handle TypedArray vs. Node.js Buffer arg differences */ |
| if (magic_typedarray) { |
| no_assert = 0; |
| #if defined(DUK_USE_INTEGER_LE) |
| endswap = !duk_to_boolean(ctx, 1); /* 1=little endian */ |
| #else |
| endswap = duk_to_boolean(ctx, 1); /* 1=little endian */ |
| #endif |
| } else { |
| no_assert = duk_to_boolean(ctx, (magic_ftype == DUK__FLD_VARINT) ? 2 : 1); |
| #if defined(DUK_USE_INTEGER_LE) |
| endswap = magic_bigendian; |
| #else |
| endswap = !magic_bigendian; |
| #endif |
| } |
| |
| /* Offset is coerced first to signed integer range and then to unsigned. |
| * This ensures we can add a small byte length (1-8) to the offset in |
| * bound checks and not wrap. |
| */ |
| offset_signed = duk_to_int(ctx, 0); |
| offset = (duk_uint_t) offset_signed; |
| if (offset_signed < 0) { |
| goto fail_bounds; |
| } |
| |
| DUK_DDD(DUK_DDDPRINT("readfield, buffer_length=%ld, offset=%ld, no_assert=%d, " |
| "magic=%04x, magic_fieldtype=%d, magic_bigendian=%d, magic_signed=%d, " |
| "endswap=%d", |
| (long) buffer_length, (long) offset, (int) no_assert, |
| (unsigned int) magic, (int) magic_ftype, (int) (magic_bigendian >> 3), |
| (int) (magic_signed >> 4), (int) endswap)); |
| |
| /* Update 'buffer_length' to be the effective, safe limit which |
| * takes into account the underlying buffer. This value will be |
| * potentially invalidated by any side effect. |
| */ |
| check_length = DUK_HBUFFEROBJECT_CLAMP_BYTELENGTH(h_this, buffer_length); |
| DUK_DDD(DUK_DDDPRINT("buffer_length=%ld, check_length=%ld", |
| (long) buffer_length, (long) check_length)); |
| |
| if (h_this->buf) { |
| buf = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this); |
| } else { |
| /* Neutered. We could go into the switch-case safely with |
| * buf == NULL because check_length == 0. To avoid scanbuild |
| * warnings, fail directly instead. |
| */ |
| DUK_ASSERT(check_length == 0); |
| goto fail_neutered; |
| } |
| DUK_ASSERT(buf != NULL); |
| |
| switch (magic_ftype) { |
| case DUK__FLD_8BIT: { |
| duk_uint8_t tmp; |
| if (offset + 1U > check_length) { |
| goto fail_bounds; |
| } |
| tmp = buf[offset]; |
| if (magic_signed) { |
| duk_push_int(ctx, (duk_int_t) ((duk_int8_t) tmp)); |
| } else { |
| duk_push_uint(ctx, (duk_uint_t) tmp); |
| } |
| break; |
| } |
| case DUK__FLD_16BIT: { |
| duk_uint16_t tmp; |
| if (offset + 2U > check_length) { |
| goto fail_bounds; |
| } |
| DUK_MEMCPY((void *) du.uc, (const void *) (buf + offset), 2); |
| tmp = du.us[0]; |
| if (endswap) { |
| tmp = DUK_BSWAP16(tmp); |
| } |
| if (magic_signed) { |
| duk_push_int(ctx, (duk_int_t) ((duk_int16_t) tmp)); |
| } else { |
| duk_push_uint(ctx, (duk_uint_t) tmp); |
| } |
| break; |
| } |
| case DUK__FLD_32BIT: { |
| duk_uint32_t tmp; |
| if (offset + 4U > check_length) { |
| goto fail_bounds; |
| } |
| DUK_MEMCPY((void *) du.uc, (const void *) (buf + offset), 4); |
| tmp = du.ui[0]; |
| if (endswap) { |
| tmp = DUK_BSWAP32(tmp); |
| } |
| if (magic_signed) { |
| duk_push_int(ctx, (duk_int_t) ((duk_int32_t) tmp)); |
| } else { |
| duk_push_uint(ctx, (duk_uint_t) tmp); |
| } |
| break; |
| } |
| case DUK__FLD_FLOAT: { |
| duk_uint32_t tmp; |
| if (offset + 4U > check_length) { |
| goto fail_bounds; |
| } |
| DUK_MEMCPY((void *) du.uc, (const void *) (buf + offset), 4); |
| if (endswap) { |
| tmp = du.ui[0]; |
| tmp = DUK_BSWAP32(tmp); |
| du.ui[0] = tmp; |
| } |
| duk_push_number(ctx, (duk_double_t) du.f[0]); |
| break; |
| } |
| case DUK__FLD_DOUBLE: { |
| if (offset + 8U > check_length) { |
| goto fail_bounds; |
| } |
| DUK_MEMCPY((void *) du.uc, (const void *) (buf + offset), 8); |
| if (endswap) { |
| DUK_DBLUNION_BSWAP64(&du); |
| } |
| duk_push_number(ctx, (duk_double_t) du.d); |
| break; |
| } |
| case DUK__FLD_VARINT: { |
| /* Node.js Buffer variable width integer field. We don't really |
| * care about speed here, so aim for shortest algorithm. |
| */ |
| duk_int_t field_bytelen; |
| duk_int_t i, i_step, i_end; |
| #if defined(DUK_USE_64BIT_OPS) |
| duk_int64_t tmp; |
| duk_small_uint_t shift_tmp; |
| #else |
| duk_double_t tmp; |
| duk_small_int_t highbyte; |
| #endif |
| const duk_uint8_t *p; |
| |
| field_bytelen = duk_get_int(ctx, 1); /* avoid side effects! */ |
| if (field_bytelen < 1 || field_bytelen > 6) { |
| goto fail_field_length; |
| } |
| if (offset + (duk_uint_t) field_bytelen > check_length) { |
| goto fail_bounds; |
| } |
| p = (const duk_uint8_t *) (buf + offset); |
| |
| /* Slow gathering of value using either 64-bit arithmetic |
| * or IEEE doubles if 64-bit types not available. Handling |
| * of negative numbers is a bit non-obvious in both cases. |
| */ |
| |
| if (magic_bigendian) { |
| /* Gather in big endian */ |
| i = 0; |
| i_step = 1; |
| i_end = field_bytelen; /* one i_step over */ |
| } else { |
| /* Gather in little endian */ |
| i = field_bytelen - 1; |
| i_step = -1; |
| i_end = -1; /* one i_step over */ |
| } |
| |
| #if defined(DUK_USE_64BIT_OPS) |
| tmp = 0; |
| do { |
| DUK_ASSERT(i >= 0 && i < field_bytelen); |
| tmp = (tmp << 8) + (duk_int64_t) p[i]; |
| i += i_step; |
| } while (i != i_end); |
| |
| if (magic_signed) { |
| /* Shift to sign extend. */ |
| shift_tmp = 64 - (field_bytelen * 8); |
| tmp = (tmp << shift_tmp) >> shift_tmp; |
| } |
| |
| duk_push_i64(ctx, tmp); |
| #else |
| highbyte = p[i]; |
| if (magic_signed && (highbyte & 0x80) != 0) { |
| /* 0xff => 255 - 256 = -1; 0x80 => 128 - 256 = -128 */ |
| tmp = (duk_double_t) (highbyte - 256); |
| } else { |
| tmp = (duk_double_t) highbyte; |
| } |
| for (;;) { |
| i += i_step; |
| if (i == i_end) { |
| break; |
| } |
| DUK_ASSERT(i >= 0 && i < field_bytelen); |
| tmp = (tmp * 256.0) + (duk_double_t) p[i]; |
| } |
| |
| duk_push_number(ctx, tmp); |
| #endif |
| break; |
| } |
| default: { /* should never happen but default here */ |
| goto fail_bounds; |
| } |
| } |
| |
| return 1; |
| |
| fail_neutered: |
| fail_field_length: |
| fail_bounds: |
| if (no_assert) { |
| /* Node.js return value for noAssert out-of-bounds reads is |
| * usually (but not always) NaN. Return NaN consistently. |
| */ |
| duk_push_nan(ctx); |
| return 1; |
| } |
| |
| return DUK_RET_RANGE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_readfield(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #if defined(DUK_USE_BUFFEROBJECT_SUPPORT) |
| /* XXX: split into separate functions for each field type? */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_writefield(duk_context *ctx) { |
| duk_hthread *thr; |
| duk_small_int_t magic = (duk_small_int_t) duk_get_current_magic(ctx); |
| duk_small_int_t magic_ftype; |
| duk_small_int_t magic_bigendian; |
| duk_small_int_t magic_signed; |
| duk_small_int_t magic_typedarray; |
| duk_small_int_t endswap; |
| duk_hbufferobject *h_this; |
| duk_bool_t no_assert; |
| duk_int_t offset_signed; |
| duk_uint_t offset; |
| duk_uint_t buffer_length; |
| duk_uint_t check_length; |
| duk_uint8_t *buf; |
| duk_double_union du; |
| duk_int_t nbytes = 0; |
| |
| thr = (duk_hthread *) ctx; |
| DUK_UNREF(thr); |
| |
| magic_ftype = magic & 0x0007; |
| magic_bigendian = magic & 0x0008; |
| magic_signed = magic & 0x0010; |
| magic_typedarray = magic & 0x0020; |
| DUK_UNREF(magic_signed); |
| |
| h_this = duk__require_bufobj_this(ctx); |
| DUK_ASSERT(h_this != NULL); |
| buffer_length = h_this->length; |
| |
| /* [ value offset noAssert ], when ftype != DUK__FLD_VARINT */ |
| /* [ value offset fieldByteLength noAssert ], when ftype == DUK__FLD_VARINT */ |
| /* [ offset value littleEndian ], when DUK__FLD_TYPEDARRAY (regardless of ftype) */ |
| |
| /* Handle TypedArray vs. Node.js Buffer arg differences */ |
| if (magic_typedarray) { |
| no_assert = 0; |
| #if defined(DUK_USE_INTEGER_LE) |
| endswap = !duk_to_boolean(ctx, 2); /* 1=little endian */ |
| #else |
| endswap = duk_to_boolean(ctx, 2); /* 1=little endian */ |
| #endif |
| duk_swap(ctx, 0, 1); /* offset/value order different from Node.js */ |
| } else { |
| no_assert = duk_to_boolean(ctx, (magic_ftype == DUK__FLD_VARINT) ? 3 : 2); |
| #if defined(DUK_USE_INTEGER_LE) |
| endswap = magic_bigendian; |
| #else |
| endswap = !magic_bigendian; |
| #endif |
| } |
| |
| /* Offset is coerced first to signed integer range and then to unsigned. |
| * This ensures we can add a small byte length (1-8) to the offset in |
| * bound checks and not wrap. |
| */ |
| offset_signed = duk_to_int(ctx, 1); |
| offset = (duk_uint_t) offset_signed; |
| |
| /* We need 'nbytes' even for a failed offset; return value must be |
| * (offset + nbytes) even when write fails due to invalid offset. |
| */ |
| if (magic_ftype != DUK__FLD_VARINT) { |
| DUK_ASSERT(magic_ftype >= 0 && magic_ftype < (duk_small_int_t) (sizeof(duk__buffer_nbytes_from_fldtype) / sizeof(duk_uint8_t))); |
| nbytes = duk__buffer_nbytes_from_fldtype[magic_ftype]; |
| } else { |
| nbytes = duk_get_int(ctx, 2); |
| if (nbytes < 1 || nbytes > 6) { |
| goto fail_field_length; |
| } |
| } |
| DUK_ASSERT(nbytes >= 1 && nbytes <= 8); |
| |
| /* Now we can check offset validity. */ |
| if (offset_signed < 0) { |
| goto fail_bounds; |
| } |
| |
| DUK_DDD(DUK_DDDPRINT("writefield, value=%!T, buffer_length=%ld, offset=%ld, no_assert=%d, " |
| "magic=%04x, magic_fieldtype=%d, magic_bigendian=%d, magic_signed=%d, " |
| "endswap=%d", |
| duk_get_tval(ctx, 0), (long) buffer_length, (long) offset, (int) no_assert, |
| (unsigned int) magic, (int) magic_ftype, (int) (magic_bigendian >> 3), |
| (int) (magic_signed >> 4), (int) endswap)); |
| |
| /* Coerce value to a number before computing check_length, so that |
| * the field type specific coercion below can't have side effects |
| * that would invalidate check_length. |
| */ |
| duk_to_number(ctx, 0); |
| |
| /* Update 'buffer_length' to be the effective, safe limit which |
| * takes into account the underlying buffer. This value will be |
| * potentially invalidated by any side effect. |
| */ |
| check_length = DUK_HBUFFEROBJECT_CLAMP_BYTELENGTH(h_this, buffer_length); |
| DUK_DDD(DUK_DDDPRINT("buffer_length=%ld, check_length=%ld", |
| (long) buffer_length, (long) check_length)); |
| |
| if (h_this->buf) { |
| buf = DUK_HBUFFEROBJECT_GET_SLICE_BASE(thr->heap, h_this); |
| } else { |
| /* Neutered. We could go into the switch-case safely with |
| * buf == NULL because check_length == 0. To avoid scanbuild |
| * warnings, fail directly instead. |
| */ |
| DUK_ASSERT(check_length == 0); |
| goto fail_neutered; |
| } |
| DUK_ASSERT(buf != NULL); |
| |
| switch (magic_ftype) { |
| case DUK__FLD_8BIT: { |
| if (offset + 1U > check_length) { |
| goto fail_bounds; |
| } |
| /* sign doesn't matter when writing */ |
| buf[offset] = (duk_uint8_t) duk_to_uint32(ctx, 0); |
| break; |
| } |
| case DUK__FLD_16BIT: { |
| duk_uint16_t tmp; |
| if (offset + 2U > check_length) { |
| goto fail_bounds; |
| } |
| tmp = (duk_uint16_t) duk_to_uint32(ctx, 0); |
| if (endswap) { |
| tmp = DUK_BSWAP16(tmp); |
| } |
| du.us[0] = tmp; |
| /* sign doesn't matter when writing */ |
| DUK_MEMCPY((void *) (buf + offset), (const void *) du.uc, 2); |
| break; |
| } |
| case DUK__FLD_32BIT: { |
| duk_uint32_t tmp; |
| if (offset + 4U > check_length) { |
| goto fail_bounds; |
| } |
| tmp = (duk_uint32_t) duk_to_uint32(ctx, 0); |
| if (endswap) { |
| tmp = DUK_BSWAP32(tmp); |
| } |
| du.ui[0] = tmp; |
| /* sign doesn't matter when writing */ |
| DUK_MEMCPY((void *) (buf + offset), (const void *) du.uc, 4); |
| break; |
| } |
| case DUK__FLD_FLOAT: { |
| duk_uint32_t tmp; |
| if (offset + 4U > check_length) { |
| goto fail_bounds; |
| } |
| du.f[0] = (duk_float_t) duk_to_number(ctx, 0); |
| if (endswap) { |
| tmp = du.ui[0]; |
| tmp = DUK_BSWAP32(tmp); |
| du.ui[0] = tmp; |
| } |
| /* sign doesn't matter when writing */ |
| DUK_MEMCPY((void *) (buf + offset), (const void *) du.uc, 4); |
| break; |
| } |
| case DUK__FLD_DOUBLE: { |
| if (offset + 8U > check_length) { |
| goto fail_bounds; |
| } |
| du.d = (duk_double_t) duk_to_number(ctx, 0); |
| if (endswap) { |
| DUK_DBLUNION_BSWAP64(&du); |
| } |
| /* sign doesn't matter when writing */ |
| DUK_MEMCPY((void *) (buf + offset), (const void *) du.uc, 8); |
| break; |
| } |
| case DUK__FLD_VARINT: { |
| /* Node.js Buffer variable width integer field. We don't really |
| * care about speed here, so aim for shortest algorithm. |
| */ |
| duk_int_t field_bytelen; |
| duk_int_t i, i_step, i_end; |
| #if defined(DUK_USE_64BIT_OPS) |
| duk_int64_t tmp; |
| #else |
| duk_double_t tmp; |
| #endif |
| duk_uint8_t *p; |
| |
| field_bytelen = (duk_int_t) nbytes; |
| if (offset + (duk_uint_t) field_bytelen > check_length) { |
| goto fail_bounds; |
| } |
| |
| /* Slow writing of value using either 64-bit arithmetic |
| * or IEEE doubles if 64-bit types not available. There's |
| * no special sign handling when writing varints. |
| */ |
| |
| if (magic_bigendian) { |
| /* Write in big endian */ |
| i = field_bytelen; /* one i_step added at top of loop */ |
| i_step = -1; |
| i_end = 0; |
| } else { |
| /* Write in little endian */ |
| i = -1; /* one i_step added at top of loop */ |
| i_step = 1; |
| i_end = field_bytelen - 1; |
| } |
| |
| /* XXX: The duk_to_number() cast followed by integer coercion |
| * is platform specific so NaN, +/- Infinity, and out-of-bounds |
| * values result in platform specific output now. |
| * See: test-bi-nodejs-buffer-proto-varint-special.js |
| */ |
| |
| #if defined(DUK_USE_64BIT_OPS) |
| tmp = (duk_int64_t) duk_to_number(ctx, 0); |
| p = (duk_uint8_t *) (buf + offset); |
| do { |
| i += i_step; |
| DUK_ASSERT(i >= 0 && i < field_bytelen); |
| p[i] = (duk_uint8_t) (tmp & 0xff); |
| tmp = tmp >> 8; /* unnecessary shift for last byte */ |
| } while (i != i_end); |
| #else |
| tmp = duk_to_number(ctx, 0); |
| p = (duk_uint8_t *) (buf + offset); |
| do { |
| i += i_step; |
| tmp = DUK_FLOOR(tmp); |
| DUK_ASSERT(i >= 0 && i < field_bytelen); |
| p[i] = (duk_uint8_t) (DUK_FMOD(tmp, 256.0)); |
| tmp = tmp / 256.0; /* unnecessary div for last byte */ |
| } while (i != i_end); |
| #endif |
| break; |
| } |
| default: { /* should never happen but default here */ |
| goto fail_bounds; |
| } |
| } |
| |
| /* Node.js Buffer: return offset + #bytes written (i.e. next |
| * write offset). |
| */ |
| if (magic_typedarray) { |
| /* For TypedArrays 'undefined' return value is specified |
| * by ES6 (matches V8). |
| */ |
| return 0; |
| } |
| duk_push_uint(ctx, offset + nbytes); |
| return 1; |
| |
| fail_neutered: |
| fail_field_length: |
| fail_bounds: |
| if (no_assert) { |
| /* Node.js return value for failed writes is offset + #bytes |
| * that would have been written. |
| */ |
| /* XXX: for negative input offsets, 'offset' will be a large |
| * positive value so the result here is confusing. |
| */ |
| if (magic_typedarray) { |
| return 0; |
| } |
| duk_push_uint(ctx, offset + nbytes); |
| return 1; |
| } |
| return DUK_RET_RANGE_ERROR; |
| } |
| #else /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| DUK_INTERNAL duk_ret_t duk_bi_buffer_writefield(duk_context *ctx) { |
| DUK_UNREF(ctx); |
| return DUK_RET_UNSUPPORTED_ERROR; |
| } |
| #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */ |
| |
| #undef DUK__FLD_8BIT |
| #undef DUK__FLD_16BIT |
| #undef DUK__FLD_32BIT |
| #undef DUK__FLD_FLOAT |
| #undef DUK__FLD_DOUBLE |
| #undef DUK__FLD_VARINT |
| #undef DUK__FLD_BIGENDIAN |
| #undef DUK__FLD_SIGNED |
| #undef DUK__FLD_TYPEDARRAY |