/*--------------------------------------------------------------------------- * Closure compiler and functions; including the switch() code generation. * *--------------------------------------------------------------------------- * Closures implement the possibility to treat code as data. This means * both 'function pointers' (efun, simul-efun and lfun closures) as well * as functions compiled from data at runtime (lambda closures). * * The data for closures are stored in two places: in T_CLOSURE-svalues and * in additional lambda structures. The exact type of the closure is * stored in the secondary type of the svalue, the x.closure_type. * Depending on this closure type, the data part of the svalue (union u) * holds additional data or not. * * operator closure: type = CLOSURE_OPERATOR (-0x1800) + instr * These are the closures for the LPC operators (e.g. #'&& or #'?). * The 'instr' is usually the machine instruction implementing * the operator (see lex::symbol_operator() for the exact mapping). * u.ob is the object this closure is bound to. * * efun closure: type = CLOSURE_EFUN (-0x1000) + instr * These are the closures for the LPC efuns (e.g. #'atan or #'call_other). * The 'instr' is usually the machine instruction implementing * the efun (see lex::symbol_efun() for the exact mapping). * u.ob is the object this closure is bound to. * * operator and efun closure could be implemented using the same number * range because the use of unique machine instructions guarantees no * value collision. This way however makes distinguishing the two cases * easier. * * simul-efun closure: type = CLOSURE_SIMUL_EFUN (-0x0800) + index * These are the closures for the simul-efuns. The 'index' is the * the index of the simul-efun in the function table of the simul-efun * object. * u.ob is the object this closure is bound to. * * lfun closure: type = CLOSURE_LFUN (0) * Reference to a lfun in an object. * u.lambda points to the lambda structure with the detailed data. * * identifier closure: type = CLOSURE_IDENTIFIER (1) * Reference to a variable in this object. * u.lambda points to the lambda structure with the detailed data. * * preliminary closure: type = CLOSURE_PRELIMINARY (2) * TODO: ??? * * bound lambda closure: type = CLOSURE_BOUND_LAMBDA (3) * This is an unbound lambda closure which was bound to an object. * To allow binding the same unbound lambda to different objects * at the same time, this construct uses a double indirection: * u.lambda points to a lambda structure with the binding information, * which then points to the actual unbound lambda structure. * * lambda closure: type = CLOSURE_LAMBDA (4) * Lambda closure bound to an object at compilation time. * u.lambda points to the lambda structure with the compiled function. * * unbound lambda closure: type = CLOSURE_UNBOUND_LAMBDA (5) * Unbound lambda closure, which is not bound to any object at * compile time. * u.lambda points to the lambda structure with the compiled function. * * * The additional information for closure are stored in structures * of type lambda_s, which are refcounted. For lambda closures the lambda * structure is in fact embedded in the middle of a larger memory block: * it is prepended by an array of the svalues used as constants in * the function, and followed by the actual function code. * * struct lambda_s * { * svalue_t values[] (lambda closures only) * For lambda closures, the constant values used by the function * which are indexed from the end ((svalue_t*)lambda_t). * p_int ref; * object_t *ob; * Object the closure is bound to (for bound UNBOUND_LAMBDAs just * during the execution of the lambda). * union --- Closure information --- * { * unsigned short var_index; * _IDENTIFIER: index in the variable table * Function indices are lower than CLOSURE_IDENTIFIER_OFFS * (0xe800), variable indices are higher. * The special value VANISHED_VARCLOSURE_INDEX (-1) is * used to mark vanished variables. * * struct -- CLOSURE_LFUN * { * object_t *ob; * Originating object * unsigned short index * Index in the objects function table * } lfun; * * bytecode_t code[1]; * LAMBDA and UNBOUND_LAMBDA closures: the function code. * The first bytes are: * +0: uint8 num_values * +1: uint8 num_args * +2: uint8 num_vars * +3...: the function code * 'num_values' is the number of constants store before * the lambda structure. If it is 0xff, the actual number * is stored in .values[-0x100].u.number. * * lambda_t *lambda; * BOUND_LAMBDA: pointer to the UNBOUND_LAMBDA structure. * * } function; * * svalue_t context[.lfun.context_size]; * lfun-closure context variables, if any. * Putting this array into the function.lfun somehow causes memory * corruption because some lambda structures won't be allocated large * enough. * } * * * If a lambda() is compiled while replace_program() is scheduled, the * construction information is stored in the protector and the lambda * is recompiled when the program replacement is put into place. * * * To handle lambda closures, two more svalue types are needed: * * Symbols (T_SYMBOL svalue) * Symbols are names to be used as variable names. * The name is stored as shared string in u.string, the number * of quotes is stored in x.quotes. * If the number of quotes is reduced to 0, the lambda compiler * will find/create a local variable with this name. * * Quoted Arrays (T_QUOTED_ARRAY svalue) * Quoted arrays are needed to put array literals into lambda * closures which usually treat arrays as code. * u.vec is the reference to the array, x.quotes the number * of quotes. *--------------------------------------------------------------------------- */ #include "driver.h" #include "typedefs.h" #include "my-alloca.h" #include #include #include #include "closure.h" #include "array.h" #include "backend.h" #include "exec.h" #include "instrs.h" #include "interpret.h" #include "lex.h" #include "main.h" #include "mstrings.h" #include "object.h" #include "prolang.h" #include "simulate.h" #include "simul_efun.h" #include "stdstrings.h" #ifdef USE_STRUCTS #include "structs.h" #endif /* USE_STRUCTS */ #include "svalue.h" #ifdef USE_SWAP #include "swap.h" #endif #include "switch.h" #include "xalloc.h" #ifdef USE_NEW_INLINES #include "i-svalue_cmp.h" #endif /* USE_NEW_INLINES */ /*-------------------------------------------------------------------------*/ #define MAX_LAMBDA_LEVELS 0x8000; /* Maximum recursion depth for compile_value. */ #define SYMTAB_START_SIZE 16 /* Initial number of entries in the work_area.symbols table. */ #define CODE_BUFFER_START_SIZE 1024 /* Initial size of the work_area codebuffer. */ #define VALUE_START_MAX 0x20 /* Initial number of value entries in the work_area.values table. */ /* Flags passed to resp. returned by compile_value(). * They must fit into one bytecode_t (the #'? needs that) */ #define ZERO_ACCEPTED 0x01 /* in: a return value of zero need not be coded */ #define VOID_ACCEPTED 0x02 /* in: any return value can be left out */ #define VOID_GIVEN 0x04 /* out: no return value given */ #define NEGATE_ACCEPTED 0x08 /* in: Caller accepts a reversed logic result */ #define NEGATE_GIVEN 0x10 /* out: Result is in reversed logic */ #define REF_REJECTED 0x20 /* in: lvalues not accepted */ #define VOID_WANTED (ZERO_ACCEPTED | VOID_ACCEPTED | NEGATE_ACCEPTED) /* all "don't care for the result" flags. */ /* Flags passed to compile_lvalue() */ #define USE_INDEX_LVALUE 0x1 /* Use INDEX_LVALUE instead of PUSH_INDEX_LVALUE */ #define PROTECT_LVALUE 0x2 /* Protect the generated lvalue */ #define UNIMPLEMENTED \ lambda_error("Unimplemented - contact the maintainer\n"); /* Guess :-) */ /*-------------------------------------------------------------------------*/ /* Types */ typedef struct symbol_s symbol_t; typedef struct work_area_s work_area_t; /* --- struct lambda_replace_program_protecter --- * * If closures are bound to objects for which a program replacement * has been scheduled, a list of these structures, one for each closure, * is kept in replace_ob_s.lambda_rpp to hold the necessary information * to adjust the closures after the program has been replaced. * * The list is created by calls to lambda_ref_replace_program() and evaluated * in lambda_replace_program_adjust(). */ struct lambda_replace_program_protector { struct lambda_replace_program_protector *next; /* The list link */ svalue_t l; /* The closure bound, counted as reference */ p_int size; /* 0, or for lambda()s the number of parameters in .args */ vector_t *args; /* If .size != 0: the parameter array of a lambda() */ svalue_t block; /* If .size != 0: the lambda() body */ }; /* --- struct symbol_s: Symbolentry --- * * All encountered local symbols (arguments and variables) are stored using * this structure in a hashed symbol table in the work_area. */ struct symbol_s { string_t *name; /* Tabled name of the symbol (not counted as ref) */ symbol_t *next; /* Next symbol structure in hash list */ symbol_t *next_local; int index; /* Index number of this symbol, -1 if unassigned */ }; /* --- struct work_area_s: Information for the lambda compilation --- * * This structure holds all the information for a lambda compilation. * In theory this allows nested compilations, but this feature hasn't been * implemented yet. */ struct work_area_s { symbol_t **symbols; /* Dynamic ashtable of lists of symbols */ mp_int symbol_max; /* Allocated size of .symbols in byte */ mp_int symbol_mask; /* Mask hashvalue -> .symbols index */ mp_int symbols_left; /* Size(!) of symbols left to enter into .symbols before the table * has to be enlarged. */ bytecode_p code; /* Memory block for the generated code, filled from the beginning */ bytecode_p codep; /* First free bytecode in .code */ mp_int code_max; /* Size of .code in byte */ mp_int code_left; /* Unused .code left in byte */ svalue_t *values; /* Memory block for the values, filled from the end */ svalue_t *valuep; /* Last value assigned in .values */ mp_int value_max; /* Size of *.values in entries */ mp_int values_left; /* Number of unused values */ mp_int num_locals; /* Number of local vars, including args */ mp_int levels_left; object_t *lambda_origin; /* Object the lambda will be bound to */ int break_stack; /* Current size of the break stack */ int max_break_stack; /* Max size of the break stack */ }; /* TODO: Use a pooled allocator for the memory held by the work_area (or * TODO:: all workareas?) */ /*-------------------------------------------------------------------------*/ /* Variables for the switch() code generator */ static Bool switch_initialized; /* TRUE if the case_blocks/case_state variables are valid. * Set to FALSE whenever a new lambda is compiled. */ case_state_t case_state; /* State of the current switch generation. */ static case_list_entry_t *case_blocks = NULL; static case_list_entry_t *case_blocks_last = NULL; /* List of allocated case_list_entry_t blocks, freed in free_symbols(). * The blocks are arrays [CASE_BLOCKING_FACTOR] of case_list_entry_t's, * and the first entry is used to link the blocks. * The blocks are used from the beginning, case_state.free_block points * the currently used block. There may be blocks following .free_block * which were generated during the compilation of nested switch()es. */ static case_list_entry_t *save_case_free_block; static case_list_entry_t *save_case_next_free; static case_list_entry_t *save_case_list0; static case_list_entry_t *save_case_list1; /* Saved case_state entries .free_block, .next_free, .list0, .list1 * from a LPC compilation. This happens when lambda() is called during * a compile, e.g. from an LPC error handler. * These values are too restored in free_symbols(). */ /*-------------------------------------------------------------------------*/ /* Lambda compiler variables */ static work_area_t current = { 0, 0, 0, 0, 0, 0 }; /* The current (and in this implementation only) work area. */ /*-------------------------------------------------------------------------*/ /* Forward declarations */ static void lambda_error VARPROT((const char *error_str, ...), printf, 1, 2) NORETURN; static void free_symbols(void); static Bool is_lvalue (svalue_t *argp, int index_lvalue); static void compile_lvalue(svalue_t *, int); static lambda_t * lambda (vector_t *args, svalue_t *block, object_t *origin); /*-------------------------------------------------------------------------*/ static INLINE int function_cmp (const string_t *name, const program_t *prog, int ix) /* Compare with the name of function number in ram. * Result: 0: is equal to the indexed name * >0: is smaller * <0: is bigger * * Note that both names are directly tabled strings, so only the pointers are * compared. */ { funflag_t flags; /* Set ix to the memory offset for the (possibly inherited) function * function. */ ix = prog->function_names[ix]; flags = prog->functions[ix]; while (flags & NAME_INHERITED) { inherit_t *inheritp; inheritp = &prog->inherit[flags & INHERIT_MASK]; prog = inheritp->prog; ix -= inheritp->function_index_offset; flags = prog->functions[ix]; } /* Return the result of the comparison */ /* Compare the two pointers. * The comparison operation has to match the one in prolang.y:epilog(). */ return memcmp( &name, FUNCTION_NAMEP(prog->program + (flags & FUNSTART_MASK)) , sizeof name ); } /* function_cmp() */ /*-------------------------------------------------------------------------*/ long find_function (const string_t *name, const program_t *prog) /* Find the function (a shared string) in the ram. * Result is the index of the function in the functions[] table, * or -1 if the function hasn't been found. */ { int i, o, d; /* Testindex, Partitionsize, Comparisonresult */ int size; /* Number of functions */ if ( !(size = prog->num_function_names) ) return -1; /* A simple binary search */ i = size >> 1; o = (i+2) >> 1; for (;;) { d = function_cmp(name, prog, i); if (d<0) { i -= o; if (i < 0) { i = 0; } } else if (d > 0) { i += o; if (i >= size) { i = size-1; } } else { return prog->function_names[i]; } if (o <= 1) { if (function_cmp(name, prog, i)) return -1; return prog->function_names[i]; } o = (o+1) >> 1; } /* NOTREACHED */ return -1; } /* find_function() */ /*-------------------------------------------------------------------------*/ Bool closure_eq (svalue_t * left, svalue_t * right) /* Compare the two closure svalues and and return TRUE if * they refer to the same closure. */ { int i; /* Operator, Efun and Simul efun closures don't have a .u.lambda * part. */ i = left->x.generic == right->x.generic; if (i && ( left->x.closure_type >= 0 || right->x.closure_type >= 0) ) i = left->u.lambda == right->u.lambda; /* Lfun- and identifier closure can be equal even if * their pointers differ. */ if (!i && left->x.closure_type == right->x.closure_type && ( left->x.closure_type == CLOSURE_LFUN || left->x.closure_type == CLOSURE_IDENTIFIER ) && left->u.lambda->ob == right->u.lambda->ob ) { if (left->x.closure_type == CLOSURE_LFUN) { i = ( left->u.lambda->function.lfun.ob == right->u.lambda->function.lfun.ob) && ( left->u.lambda->function.lfun.index == right->u.lambda->function.lfun.index) && ( left->u.lambda->function.lfun.inhProg == right->u.lambda->function.lfun.inhProg) && ( left->u.lambda->function.lfun.context_size == right->u.lambda->function.lfun.context_size) ; #ifdef USE_NEW_INLINES if (i) { unsigned int context_size, ix; /* There might be a difference is in the context svalues. * To prevent recursion, hide them while comparing them. */ context_size = left->u.lambda->function.lfun.context_size; left->u.lambda->function.lfun.context_size = 0; right->u.lambda->function.lfun.context_size = 0; for (ix = 0; i && ix < context_size; ix++) { i = svalue_eq( &(left->u.lambda->context[ix]) , &(right->u.lambda->context[ix]) ); } /* Restore the context size. */ left->u.lambda->function.lfun.context_size = context_size; right->u.lambda->function.lfun.context_size = context_size; } #endif /* USE_NEW_INLINES */ } else /* CLOSURE_IDENTIFIER */ { i = left->u.lambda->function.var_index == right->u.lambda->function.var_index; } } return (Bool)i; } /* closure_eq() */ /*-------------------------------------------------------------------------*/ int closure_cmp (svalue_t * left, svalue_t * right) /* Compare the two closure svalues and and return a value * describing their relation: * -1: is 'smaller' than * 0: the closures are equal * 1: is 'greater' than */ { if (closure_eq(left, right)) return 0; /* First comparison criterium is the closure_type */ if (left->x.closure_type != right->x.closure_type) { return (left->x.closure_type < right->x.closure_type) ? -1 : 1; } /* The types are identical and determine the next comparison. * For lfun/identifier closure, we compare the actual closure data, * for other closures a comparison of the lambda pointer is sufficient. */ if (left->x.closure_type == CLOSURE_IDENTIFIER || left->x.closure_type == CLOSURE_LFUN ) { if (left->u.lambda->ob != right->u.lambda->ob) { return (left->u.lambda->ob < right->u.lambda->ob) ? -1 : 1; } if (left->x.closure_type == CLOSURE_LFUN) { #ifdef USE_NEW_INLINES unsigned context_size, i; int d; #endif /* USE_NEW_INLINES */ if ( left->u.lambda->function.lfun.ob != right->u.lambda->function.lfun.ob) { return ( left->u.lambda->function.lfun.ob < right->u.lambda->function.lfun.ob) ? -1 : 1; } if ( left->u.lambda->function.lfun.index != right->u.lambda->function.lfun.index ) return ( left->u.lambda->function.lfun.index < right->u.lambda->function.lfun.index) ? -1 : 1; if ( left->u.lambda->function.lfun.inhProg != right->u.lambda->function.lfun.inhProg ) return ( left->u.lambda->function.lfun.inhProg < right->u.lambda->function.lfun.inhProg) ? -1 : 1; #ifdef USE_NEW_INLINES /* The difference is in the context svalues. * To prevent recursion, hide them while comparing them. */ if ( left->u.lambda->function.lfun.context_size != right->u.lambda->function.lfun.context_size ) return ( left->u.lambda->function.lfun.context_size < right->u.lambda->function.lfun.context_size) ? -1 : 1; context_size = left->u.lambda->function.lfun.context_size; left->u.lambda->function.lfun.context_size = 0; right->u.lambda->function.lfun.context_size = 0; for (i = 0, d = 0; d == 0 && i < context_size; i++) { d = svalue_cmp( &(left->u.lambda->context[i]) , &(right->u.lambda->context[i]) ); } /* Restore the context size, the return the comparison * result in d. */ left->u.lambda->function.lfun.context_size = context_size; right->u.lambda->function.lfun.context_size = context_size; return d; #else return 0; /* Shouldn't be reached */ #endif /* USE_NEW_INLINES */ } else /* CLOSURE_IDENTIFIER */ { /* This is the only field left, so it is guaranteed to differ */ return ( left->u.lambda->function.var_index < right->u.lambda->function.var_index) ? -1 : 1; } } /* Normal closure: compare the lambda pointers */ return (left->u.lambda < right->u.lambda) ? -1 : 1; } /* closure_cmp() */ /*-------------------------------------------------------------------------*/ Bool lambda_ref_replace_program( object_t * curobj, lambda_t *l, int type , p_int size, vector_t *args, svalue_t *block) /* The lambda of type is about to be bound to the object * which might be scheduled for program replacement. * If that is the case, a(nother) protector is added to replace_ob_s.lambda_rpp * and the function returns TRUE. Otherwise the function just returns FALSE. * * If is not zero, it is the size of , a vector with parameter * descriptions for a lambda(), and holds the body of the lambda(). * If is zero, both and are undetermined. */ { replace_ob_t *r_ob; /* Search for a program replacement scheduled for the current * object. */ for (r_ob = obj_list_replace; r_ob; r_ob = r_ob->next) { if (r_ob->ob == curobj) { /* Replacement found: add the protector */ struct lambda_replace_program_protector *lrpp; l->ref++; lrpp = xalloc(sizeof *lrpp); lrpp->l.u.lambda = l; lrpp->l.x.closure_type = (short)type; lrpp->next = r_ob->lambda_rpp; r_ob->lambda_rpp = lrpp; if (size) { lrpp->size = size; lrpp->args = ref_array(args); assign_svalue_no_free(&lrpp->block, block); } return MY_TRUE; } } /* for() */ /* No replacement found: return false */ return MY_FALSE; } /* lambda_ref_replace_program() */ /*-------------------------------------------------------------------------*/ void set_closure_user (svalue_t *svp, object_t *owner) /* Set as the new user of the closure stored in if the closure * is an operator-, sefun- or efun-closure, or if the closure is under * construction ("preliminary"). Finished lambda closures can't be rebound. * * Sideeffect: for preliminary closures, the function also determines the * proper svp->x.closure_type and updates the closures .function.index. */ { int type; /* Type of the closure */ if ( !CLOSURE_MALLOCED(type = svp->x.closure_type) ) { /* Operator-, sefun-, efun-closure: just rebind */ free_object(svp->u.ob, "set_closure_user"); svp->u.ob = ref_object(owner, "set_closure_user"); } else if (type == CLOSURE_PRELIMINARY) { /* lambda closure under construction: rebind, but take care * of possible program replacement */ int ix; lambda_t *l; funflag_t flags; program_t *prog; prog = owner->prog; l = svp->u.lambda; ix = l->function.lfun.index; /* If the program is scheduled for replacement (or has been replaced), * create the protector for the closure, otherwise mark the object * as referenced by a lambda. */ if ( !(prog->flags & P_REPLACE_ACTIVE) || !lambda_ref_replace_program( owner, l , ix >= CLOSURE_IDENTIFIER_OFFS ? CLOSURE_IDENTIFIER : CLOSURE_LFUN , 0, NULL, NULL) ) { owner->flags |= O_LAMBDA_REFERENCED; } /* Set the svp->x.closure_type to the type of the closure. */ if (ix >= CLOSURE_IDENTIFIER_OFFS) { /* Identifier closure */ ix -= CLOSURE_IDENTIFIER_OFFS; svp->x.closure_type = CLOSURE_IDENTIFIER; /* Update the closure index */ l->function.var_index = (unsigned short)ix; } else { /* lfun closure. Be careful to handle cross-defined lfuns * correctly. */ flags = prog->functions[ix]; if (flags & NAME_CROSS_DEFINED) { ix += CROSSDEF_NAME_OFFSET(flags); } svp->x.closure_type = CLOSURE_LFUN; /* Update the closure index */ l->function.lfun.ob = ref_object(owner, "closure"); l->function.lfun.index = (unsigned short)ix; #ifdef USE_NEW_INLINES l->function.lfun.context_size = 0; #endif /* USE_NEW_INLINES */ } /* (Re)Bind the closure */ free_object(l->ob, "closure"); l->ob = ref_object(owner, "set_closure_user"); } } /* set_closure_user() */ /*-------------------------------------------------------------------------*/ void replace_program_lambda_adjust (replace_ob_t *r_ob) /* This function is called as the last step during the replacement of an * object's program, but only if the object has been marked to hold * closure references. * * The function is called in the backend context and catches errors during * its execution. */ { static struct lambda_replace_program_protector *current_lrpp; /* Copy of lrpp, static to survive errors */ struct lambda_replace_program_protector *lrpp; /* Current protector */ struct lambda_replace_program_protector *next_lrpp; /* Next protector */ struct error_recovery_info error_recovery_info; /* Loop through the list of lambda protectors, adjusting * the lfun closures. Vanished lfun closures are replaced by * references to master::dangling_lfun_closure() if existing. * Vanished identifier closures are marked with a special value * and just vanish. * TODO: Store the name somehow for error messages/sprintf/to_string? * * This is done first because these are possible building blocks. */ lrpp = r_ob->lambda_rpp; do { if ( !CLOSURE_HAS_CODE(lrpp->l.x.closure_type) ) { /* Yup, it's an lfun or identifier */ if (lrpp->l.x.closure_type == CLOSURE_LFUN) { lambda_t *l; int i; /* Adjust the index of the lfun * If the lfun closure is a reference to an inherited * program we need to check if the inheritance relation * changes. */ l = lrpp->l.u.lambda; if (!l->function.lfun.inhProg) i = l->function.lfun.index -= r_ob->fun_offset; else if (l->function.lfun.inhProg == r_ob->new_prog) { /* First possibility: the new program is the same * one the closure is pointing to. * In that case, convert the closure into a straight * lfun closure. */ i = l->function.lfun.index -= r_ob->fun_offset; free_prog(l->function.lfun.inhProg, MY_TRUE); l->function.lfun.inhProg = NULL; } else if (l->function.lfun.index >= r_ob->fun_offset && l->function.lfun.index < r_ob->fun_offset + r_ob->new_prog->num_functions) { program_t *prog; /* Second possibility: the new program still * inherits the program the closure is referencing. * In that case, just update the inhIndex. */ i = l->function.lfun.index -= r_ob->fun_offset; /* Checkt hat inhProg is still in the inherit chain. * If not, convert the closure into a straight * lfun closure. */ prog = r_ob->new_prog; while(prog != l->function.lfun.inhProg) { inherit_t *inheritp; if (!prog->num_inherited) { /* Didn't find it. */ l->function.lfun.inhProg = NULL; break; } SEARCH_FUNCTION_INHERIT(inheritp, prog, i); i-= inheritp->function_index_offset; prog = inheritp->prog; if (i >= prog->num_functions) { /* We didn't find inhProg. */ l->function.lfun.inhProg = NULL; break; } } i = l->function.lfun.index; } else i = -1; /* If the function vanished, replace it with a default */ if (i < 0 || i >= r_ob->new_prog->num_functions) { assert_master_ob_loaded(); free_object( l->function.lfun.ob , "replace_program_lambda_adjust"); if(l->function.lfun.inhProg) free_prog(l->function.lfun.inhProg, MY_TRUE); l->function.lfun.ob = ref_object(master_ob , "replace_program_lambda_adjust"); i = find_function( STR_DANGLING_LFUN , master_ob->prog); l->function.lfun.index = (unsigned short)(i < 0 ? 0 :i); l->function.lfun.inhProg = NULL; } } else /* CLOSURE_IDENTIFIER */ { lambda_t *l; int i; /* Adjust the index of the identifier */ l = lrpp->l.u.lambda; i = l->function.var_index -= r_ob->var_offset; /* If it vanished, mark it as such */ if (i >= r_ob->new_prog->num_variables) { l->function.var_index = VANISHED_VARCLOSURE_INDEX; /* TODO: This value should be properly publicized and * TODO:: tested. */ } } } /* if (!CLOSURE_HAS_CODE()) */ } while ( NULL != (lrpp = lrpp->next) ); /* Second pass: now adjust the lambda closures. * This is done by recompilation of every closure and comparison * with the original one. If the two closures differ, the closure * references now-vanished entities and has to be abandoned. * * In such a case, an error is generated and also caught: the code * for the closure is replaced by the instruction "undef" so that * accidental executions are caught. */ error_recovery_info.rt.last = rt_context; error_recovery_info.rt.type = ERROR_RECOVERY_BACKEND; rt_context = (rt_context_t *)&error_recovery_info; if (setjmp(error_recovery_info.con.text)) { bytecode_p p; lrpp = current_lrpp; /* Replace the function with "undef" */ p = LAMBDA_CODE(lrpp->l.u.lambda->function.code); p[0] = F_UNDEF; /* Free the protector and all held values */ free_array(lrpp->args); free_svalue(&lrpp->block); free_closure(&lrpp->l); next_lrpp = lrpp->next; xfree(lrpp); /* Restart the loop */ lrpp = next_lrpp; } else /* Set lrpp to the first lambda to process. * (Doing it here makes gcc happy). */ lrpp = r_ob->lambda_rpp; /* lrpp here is the next protector to handle, or NULL */ if (lrpp) do { /* If it's a lambda, adjust it */ if (lrpp->l.x.closure_type == CLOSURE_LAMBDA) { lambda_t *l, *l2; /* Original and recompiled closure */ svalue_t *svp, *svp2; /* Pointer to the two closure's values */ mp_int num_values, num_values2, code_size2; current_lrpp = lrpp; /* in case an error occurs */ /* Remember the original lambda, and also recompile it */ l = lrpp->l.u.lambda; l2 = lambda(lrpp->args, &lrpp->block, l->ob); svp = (svalue_t *)l; if ( (num_values = LAMBDA_NUM_VALUES(l->function.code)) == 0xff) num_values = svp[-0x100].u.number; svp2 = (svalue_t *)l2; if ( (num_values2 = LAMBDA_NUM_VALUES(l2->function.code)) == 0xff) num_values2 = svp2[-0x100].u.number; code_size2 = current.code_max - current.code_left; /* If the recompiled lambda differs from the original one, we * lose it. */ if (num_values != num_values2 || lrpp->size != code_size2) { free_svalue(&lrpp->block); /* lrpp->block will be freed after the error, so lets fake * a closure svalue and put the just-compiled closure in * there. */ lrpp->block.type = T_CLOSURE; lrpp->block.x.closure_type = CLOSURE_UNBOUND_LAMBDA; lrpp->block.u.lambda = l2; errorf("Cannot adjust lambda closure after replace_program(), " "object %s\n", get_txt(r_ob->ob->name)); } /* The recompiled lambda can be used (and has to: think changed * indices), so replace the original by the new one. * We have to keep the memory of the original one as other * code might already reference it. */ while (--num_values >= 0) transfer_svalue(--svp, --svp2); memcpy(l->function.code, l2->function.code, (size_t)code_size2); /* Free the (now empty) memory */ if (l2->ob) free_object(l2->ob, "replace_program_lambda_adjust"); if (l2->prog_ob) free_object(l2->prog_ob, "replace_program_lambda_adjust"); xfree(svp2); free_array(lrpp->args); free_svalue(&lrpp->block); } /* lambda or not, the protector is no longer needed */ free_closure(&lrpp->l); next_lrpp = lrpp->next; xfree(lrpp); } while ( NULL != (lrpp = next_lrpp) ); /* Restore the old error recovery info */ rt_context = error_recovery_info.rt.last; } /* replace_lambda_program_adjust() */ /*-------------------------------------------------------------------------*/ void closure_init_lambda (lambda_t * l, object_t * obj) /* Initialize the freshly created lambda to be bound to object * (if given), and set the other generic fields (.ref, .prog_ob, .prog_pc). */ { l->ref = 1; if (current_prog) { l->prog_ob = ref_valid_object(current_prog->blueprint, "lambda creator"); l->prog_pc = inter_pc - current_prog->program; } else { l->prog_ob = NULL; l->prog_pc = 0; } if (obj) l->ob = ref_object(obj, "lambda object"); else l->ob = NULL; } /* closure_init_lambda() */ /*-------------------------------------------------------------------------*/ #ifndef USE_NEW_INLINES lambda_t * closure_new_lambda (object_t * obj, Bool raise_error) #else /* USE_NEW_INLINES */ lambda_t * closure_new_lambda ( object_t * obj, unsigned short context_size , Bool raise_error) #endif /* USE_NEW_INLINES */ /* Create a basic lambda closure structure, suitable to hold * context values, and bound to . The structure has the generic * fields (.ref, .ob, .prog_ob, .prog_pc) initialized. * * The function may raise an error on out of memory if is TRUE, * or just return NULL. */ { lambda_t *l; /* Allocate a new lambda structure */ #ifndef USE_NEW_INLINES l = xalloc(sizeof(*l)); #else /* USE_NEW_INLINES */ l = xalloc(SIZEOF_LAMBDA(context_size)); #endif /* USE_NEW_INLINES */ if (!l) { if (raise_error) { #ifndef USE_NEW_INLINES outofmem(sizeof(*l), "closure literal"); #else /* USE_NEW_INLINES */ outofmem(SIZEOF_LAMBDA(context_size) , "closure literal"); #endif /* USE_NEW_INLINES */ /* NOTREACHED */ } return NULL; } closure_init_lambda(l, obj); return l; } /* closure_new_lambda() */ /*-------------------------------------------------------------------------*/ void closure_identifier (svalue_t *dest, object_t * obj, int ix, Bool raise_error) /* Create a literal variable closure, bound to and with variable * index . The caller has to account for any variable offsets before * calling this function. * * The created closure is stored as new svalue into *. * * The function may raise an error on out of memory if is TRUE, * or set * to svalue 0 else. */ { lambda_t *l; /* Allocate an initialise a new lambda structure */ #ifndef USE_NEW_INLINES l = closure_new_lambda(obj, raise_error); #else /* USE_NEW_INLINES */ l = closure_new_lambda(obj, 0, raise_error); #endif /* USE_NEW_INLINES */ if (!l) { put_number(dest, 0); return; } /* If the object's program will be replaced, store the closure * in lambda protector, otherwise mark the object as referenced by * a closure. */ if ( !(obj->prog->flags & P_REPLACE_ACTIVE) || !lambda_ref_replace_program( obj, l, CLOSURE_IDENTIFIER , 0, NULL, NULL) ) { obj->flags |= O_LAMBDA_REFERENCED; } dest->x.closure_type = CLOSURE_IDENTIFIER; l->function.var_index = (unsigned short)ix; /* Fill in the rest of the lambda and of the result svalue */ dest->type = T_CLOSURE; dest->u.lambda = l; } /* closure_identifier() */ /*-------------------------------------------------------------------------*/ #ifndef USE_NEW_INLINES void closure_lfun ( svalue_t *dest, object_t *obj, program_t *prog, int ix , Bool raise_error) #else /* USE_NEW_INLINES */ void closure_lfun ( svalue_t *dest, object_t *obj, program_t *prog, int ix , unsigned short num , Bool raise_error) #endif /* USE_NEW_INLINES */ /* Create a literal lfun closure, bound to the object . The resulting * svalue is stored in *. * * The closure is defined by the function index , for which the caller * has to make sure that all function offsets are applied before calling * this function. is relative to the object's program. is * the program used for the lookup of this function (but is nevertheless * the index into the object's function table, not neccessarily into the * function table of ). indicates the number of context variables * which are initialized to svalue-0. * * The function may raise an error on out of memory if is TRUE, * or set * to svalue 0 else. */ { lambda_t *l; /* Allocate and initialise a new lambda structure */ #ifndef USE_NEW_INLINES l = closure_new_lambda(obj, raise_error); #else /* USE_NEW_INLINES */ l = closure_new_lambda(obj, num, raise_error); #endif /* USE_NEW_INLINES */ if (!l) { put_number(dest, 0); return; } /* If the object's program will be replaced, store the closure * in lambda protector, otherwise mark the object as referenced by * a closure. */ if ( !(obj->prog->flags & P_REPLACE_ACTIVE) || !lambda_ref_replace_program( obj, l, CLOSURE_LFUN , 0, NULL, NULL) ) { obj->flags |= O_LAMBDA_REFERENCED; } dest->x.closure_type = CLOSURE_LFUN; l->function.lfun.ob = ref_object(obj, "closure"); l->function.lfun.index = (unsigned short)ix; l->function.lfun.inhProg = prog; if (prog) reference_prog(prog, "closure_lfun"); #ifdef USE_NEW_INLINES l->function.lfun.context_size = num; /* Init the context variables */ while (num > 0) { num--; put_number(&(l->context[num]), 0); } #endif /* USE_NEW_INLINES */ /* Fill in the rest of the lambda and of the result svalue */ dest->type = T_CLOSURE; dest->u.lambda = l; } /* closure_lfun() */ /*-------------------------------------------------------------------------*/ #ifndef USE_NEW_INLINES void closure_literal (svalue_t *dest, int ix, unsigned short inhIndex) #else /* USE_NEW_INLINES */ void closure_literal ( svalue_t *dest , int ix, unsigned short inhIndex, unsigned short num) #endif /* USE_NEW_INLINES */ /* Create a literal closure (lfun or variable closure), bound to the * current object. The resulting svalue is stored in *. The function * implements the instruction F_CLOSURE. * * The closure is defined by the index /, which is to be * interpreted in the context of the current, possibly inherited, program: * values < CLOSURE_IDENTIFIER_OFFS are lfun indices, values above are * variable indices. For closures referencing inherited lfuns * is the index+1 in the inherit list of . For lfun closures, * indicates the number context variables which are initialized to svalue-0. * * The function may raise an error on out of memory. */ { if (ix >= CLOSURE_IDENTIFIER_OFFS) { ix += - CLOSURE_IDENTIFIER_OFFS + (current_variables - current_object->variables); /* the added difference takes into account that the * index is specified relative to the program which might * have been inherited. */ closure_identifier(dest, current_object, ix, MY_TRUE); } else /* lfun closure */ { funflag_t flags; program_t *prog; if (inhIndex) { /* inherited lfun closure */ inherit_t *inh, *vinh; inh = ¤t_prog->inherit[inhIndex-1]; /* Normalize pointers to functions of virtual inherits. * This is just for comparability of the closures. */ vinh = adjust_variable_offsets(inh, current_prog, current_object); if (vinh) inh = vinh; prog = inh->prog; flags = prog->functions[ix]; if (!vinh) ix += function_index_offset; ix += inh->function_index_offset; } else { ix += function_index_offset; flags = current_object->prog->functions[ix]; prog = NULL; } if (flags & NAME_CROSS_DEFINED) { ix += CROSSDEF_NAME_OFFSET(flags); } #ifndef USE_NEW_INLINES closure_lfun(dest, current_object, prog, ix, MY_TRUE); #else closure_lfun(dest, current_object, prog, ix, num, MY_TRUE); #endif /* USE_NEW_INLINES */ } } /* closure_literal() */ /*-------------------------------------------------------------------------*/ static void realloc_values (void) /* Double the size of the value block in the current workspace. * The function is called only when all values in the current block * have been assigned. * * Raise an error when out of memory. */ { mp_int new_max; svalue_t *new_values; new_max = current.value_max * 2; new_values = xalloc(new_max * sizeof(*new_values)); if (!new_values) lambda_error("Out of memory (%lu bytes) for %ld new values\n" , new_max, new_max * sizeof(*new_values)); current.values_left += current.value_max; memcpy( (current.valuep = new_values + current.value_max) , current.values , current.value_max * sizeof(*new_values) ); xfree(current.values); current.values = new_values; current.value_max = new_max; } /* realloc_values() */ /*-------------------------------------------------------------------------*/ static void realloc_code (void) /* Double the size of the code block in the current workspace. * * Raise an error when out of memory. */ { mp_int new_max; bytecode_p new_code; ptrdiff_t curr_offset; curr_offset = current.codep - current.code; new_max = current.code_max * 2; new_code = rexalloc(current.code, (size_t)new_max); if (!new_code) lambda_error("Out of memory (%ld bytes) for new code\n", new_max); current.code_left += current.code_max; current.code_max = new_max; current.code = new_code; current.codep = current.code + curr_offset; } /* realloc_code() */ /*-------------------------------------------------------------------------*/ static void lambda_error(const char *error_str, ...) /* Raise an errorf(error_str, ...) with 0 or 1 extra argument from within * the lambda compiler. * * The function takes care that all memory is deallocated. */ { va_list va; /* Deallocate all memory held in the work_areas */ free_symbols(); if (current.code) xfree(current.code); if (current.values) { mp_int num_values = current.value_max - current.values_left; svalue_t *svp; for (svp = current.valuep; --num_values >= 0; ) free_svalue(svp++); xfree(current.values); } /* Now raise the error */ va_start(va, error_str); errorf(error_str, va_arg(va, char *)); /* One arg or nothing :-) */ /* TODO: a verror() would be handy here */ va_end(va); } /* lambda_error() */ /*-------------------------------------------------------------------------*/ static void lambda_cerror (const char *s) /* Callback for store_case_labels: raise an errorf(s) from within the * lambda compiler. * * The function takes care that all memory is deallocated. */ { lambda_error("%s\n", s); } /* lambda_cerror() */ /*-------------------------------------------------------------------------*/ static void lambda_cerrorl ( const char *s1, const char *s2 UNUSED , int line1 UNUSED, int line2 UNUSED) /* Callback for store_case_labels(): Raise an errorf(s1) from within the lambda * compiler. store_case_labels() also passes line numbers and filename, but * when compiling a lambda that information is not very useful. * * The function takes care that all memory is deallocated. */ { #ifdef __MWERKS__ # pragma unused(s2,line1,line2) #endif lambda_error(s1, "\n"); } /* lambda_errorl() */ /*-------------------------------------------------------------------------*/ static bytecode_p lambda_get_space (p_int size) /* Callback for store_case_labels(): Make space for bytes in the * current code space and return the pointer to the first byte. * * Internally this function reallocates the code space when necessary. */ { while (current.code_left < size) realloc_code(); current.code_left -= size; current.codep += size; return current.codep - size; } /* lambda_get_space() */ /*-------------------------------------------------------------------------*/ static void lambda_move_switch_instructions (int len, p_int blocklen) /* Callback from store_case_labels(): Move the last instructions * of bytes forward by bytes. */ { while (current.code_left < len) realloc_code(); current.code_left -= len; current.codep += len; move_memory( current.codep - blocklen , current.codep - blocklen - len , (size_t)blocklen ); } /* lambda_move_switch_instructions() */ /*-------------------------------------------------------------------------*/ static void free_symbols (void) /* Free the symbols in the current workarea, and also the memory allocated * for the case blocks. */ { p_int i; symbol_t **symp, *sym, *next; /* Free the symbols */ i = current.symbol_max; symp = current.symbols; do { for (sym = *symp++; sym; sym = next) { next = sym->next; xfree(sym); } } while (i -= sizeof sym); xfree(current.symbols); /* Clean up the memory for the case blocks */ if (switch_initialized) { if (current_loc.file) { case_state.free_block = save_case_free_block; case_state.next_free = save_case_next_free; case_state.list0 = save_case_list0; case_state.list1 = save_case_list1; } else { free_case_blocks(); } } } /* free_symbols() */ /*-------------------------------------------------------------------------*/ static symbol_t * make_symbol (string_t *name) /* Look up the symbol in the current symbol table and return the * pointer to the symbol_t structure. If is not yet in the table, * a new structure is generated, linked in, and returned. * * If necessary, the symbol table is enlarged. */ { p_int h; symbol_t *sym, **symp; /* Hash the pointer and look it up in the table. * TODO: This assumes 32-Bit ints. */ h = (p_int)name; h ^= h >> 16; h ^= h >> 8; h ^= h >> 4; h &= current.symbol_mask; symp = (symbol_t **)((char *)current.symbols + h); for (sym = *symp; sym; sym = sym->next) { if (sym->name == name) return sym; } /* Not found: generate a new symbol entry and link it in */ sym = xalloc(sizeof *sym); if (!sym) lambda_error("Out of memory (%lu bytes) for symbol\n" , (unsigned long) sizeof(*sym)); sym->name = name; sym->index = -1; sym->next = *symp; *symp = sym; /* Does the table has to be enlarged now? */ if ( !(current.symbols_left -= sizeof sym) ) { /* Yup. Double the size of the hashtable and re-hash all * existing entries. */ symbol_t **newtab, *sym2; p_int i; sym2 = sym; /* Save the new entry */ /* Allocate the new table and initialize it */ current.symbols_left = current.symbol_max; current.symbol_max *= 2; symp = newtab = xalloc((size_t)current.symbol_max); if (!symp) { current.symbol_max /= 2; xfree(sym); lambda_error("Out of memory (%ld bytes) for symbol table\n" , current.symbol_max); } current.symbol_mask = i = current.symbol_max - (long)sizeof sym; do { *symp++ = NULL; } while ((i -= sizeof sym) >= 0); /* Loop over the old table and all entries and rehash them * into the new table. * TODO: Again the hash assumes 32-Bit-ints. */ i = current.symbols_left - (long)sizeof sym; do { symbol_t *next; for ( sym = *(symbol_t **)((char *)current.symbols+i) ; sym; sym = next) { next = sym->next; h = (p_int)sym->name; h ^= h >> 16; h ^= h >> 8; h ^= h >> 4; h &= current.symbol_mask; symp = (symbol_t **)((char *)newtab + h); sym->next = *symp; *symp = sym; } } while ((i -= sizeof sym) >= 0); /* Put the new table in place of the old one */ xfree(current.symbols); current.symbols = newtab; sym = sym2; /* Restore the pointer to the new entry */ } /* Return the new entry */ return sym; } /* make_symbol() */ /*-------------------------------------------------------------------------*/ static void insert_value_push (svalue_t *value) /* Add the to the value block of the closure, and insert * the appropriate F_LAMBDA_(C)CONSTANT instruction to the compiled * code. */ { mp_int offset; /* Index of the value in the value block */ if (current.code_left < 3) realloc_code(); offset = current.value_max - current.values_left; if (offset < 0xff) { /* Less than 255 values: the short instruction */ current.code_left -= 2; STORE_CODE(current.codep, F_LAMBDA_CCONSTANT); STORE_UINT8(current.codep, (unsigned char)offset); } else { /* More than 254 values: the long instruction */ if (offset == 0xff) { /* Offset #0xff will be used to hold the actual * number of values. */ current.values_left--; offset++; (--current.valuep)->type = T_INVALID; } current.code_left -= 3; STORE_CODE(current.codep, F_LAMBDA_CONSTANT); STORE_SHORT(current.codep, offset); } if (--current.values_left < 0) realloc_values(); /* Don't forget to copy the value itself */ assign_svalue_no_free(--current.valuep, value); } /* insert_value_push() */ /*-------------------------------------------------------------------------*/ static int compile_value (svalue_t *value, int opt_flags) /* Compile the into a closure in the context of the current * work_area. gives additional instructions about what * to accept or reject. The generated code is appended to the code * buffer in the work_area, the function itself returns a flag whether * the code leaves a result on the stack or not. * * The function calls itself recursively for nested code sequences. * * can be of these types: * array: a block of instructions in lisp-ish array notation. * quoted array: inserted as is with one quote less * symbol, 1 quote: resolved as local variable/argument * symbol, > 1 quote: inserted as symbol with one quote less * other: inserted as is */ { if (!--current.levels_left) lambda_error("Too deep recursion inside lambda()\n"); switch(value->type) { case T_POINTER: /* ----- T_POINTER ----- */ { vector_t *block; /* The block of svalues to compile */ svalue_t *argp; /* Pointer to the current svalue */ ph_int type; /* Various types */ block = value->u.vec; argp = block->item; /* The first value must be a closure */ if (block == &null_vector || argp->type != T_CLOSURE) { lambda_error("Missing function\n"); } if ( (type = argp->x.closure_type) < (ph_int)CLOSURE_SIMUL_EFUN) { /* Most common case: closure is an efun or an operator */ if (type < (ph_int)CLOSURE_EFUN) { /* Closure is an operator */ mp_int block_size; /* Number of entries */ block_size = (mp_int)VEC_SIZE(block); switch (type - CLOSURE_OPERATOR) { default: lambda_error("Unimplemented operator %s for lambda()\n", instrs[type - CLOSURE_OPERATOR].name); /* ({ #'||, arg1, ..., argn }) * ({ #'&&, arg1, ..., argn }) */ case F_LOR: case F_LAND: { /* For #'|| his is compiled into * * F_LAND end * * F_LAND end * ... * * end: * * If only the logical result is needed (VOID_ACCEPTED), * F_LAND are replaced by F_BRANCH_ZERO. If the distance * to end is too big, the F_LANDs are compiled as: * * * DUP * LBRANCH_ZERO end * POP * * * respectively for the logical result: * * * LBRANCH_ZERO end * * * Analog for F_LOR, here the branches are on _NON_ZERO. */ mp_int *branchp; /* Table storing the position of the branch/operator * instruction after every compiled argument. */ mp_int i; /* most of the time: number of values left */ mp_int start; mp_int end; /* first free code byte */ Bool is_and; /* TRUE if the operator is F_LAND */ int code; /* Compiled instruction */ int void_given; code = type - CLOSURE_OPERATOR; is_and = code == (F_LAND); /* If the caller doesn't need a return value, * compile the operator as branches (much faster). */ if (opt_flags & VOID_ACCEPTED) { code = is_and ? F_BRANCH_WHEN_ZERO : F_BRANCH_WHEN_NON_ZERO; opt_flags |= VOID_GIVEN; } /* Generate the code for the arguments but the last one. * After every compiled argument, insert and * an empty byte and store the position of the inserted * byte in the branchp table. */ i = block_size - 1; branchp = alloca(i * sizeof *branchp); while (--i > 0) { compile_value(++argp, REF_REJECTED); if (current.code_left < 2) realloc_code(); *branchp++ = current.code_max - current.code_left; current.code_left -= 2; PUT_CODE(current.codep, (bytecode_t)code); current.codep += 2; } /* If i is != 0 here, no arguments were given. * In that case, fake a result, otherwise compile the * last argument. */ if (i) void_given = compile_value(is_and ? &const1 : &const0 , opt_flags & (VOID_ACCEPTED|REF_REJECTED) ); else void_given = compile_value(++argp , opt_flags & (VOID_ACCEPTED|REF_REJECTED) ); /* If the caller accepts void, but we compiled a result, * remove it from the stack. */ if (opt_flags & VOID_ACCEPTED && !(void_given & VOID_GIVEN)) { if (current.code_left < 1) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); } /* Walk backwards through the generated code segments * and store the correct offsets for the operator/branch * instructions. If necessary, the short branches are * converted into long ones. */ i = block_size - 1; end = current.code_max - current.code_left; /* The target to jump to */ while (--i > 0) { mp_int offset; start = *--branchp; offset = end - start - 2; if (offset <= 0xff) { PUT_UINT8(current.code+start+1, (unsigned char)offset); continue; } else { /* We exceeded the limit of the short offsets. * Prepare the extension of the remaining offsets * to long offsets. */ mp_int growth; /* Additional bytes needed */ int growth_factor; /* Additional byte per branch */ mp_int j; bytecode_p p, q; /* Src/Dest for code copying */ if (opt_flags & VOID_ACCEPTED) { /* We don't need a result: just change the * short into long branches. */ growth = i; growth_factor = 1; code = is_and ? F_LBRANCH_WHEN_ZERO : F_LBRANCH_WHEN_NON_ZERO; } else { /* We need a result: change the OP instructions * into OP/LBRANCH combinations. */ growth = i * 3; growth_factor = 3; code = is_and ? F_LBRANCH_WHEN_ZERO : F_LBRANCH_WHEN_NON_ZERO; } /* Prepare the copying of the code */ if (current.code_left < growth) realloc_code(); current.code_left -= growth; current.codep += growth; p = current.code + end; q = p + growth; end += growth_factor - 1; /* - 1 is precompensation for leading branch code */ if ( !(opt_flags & VOID_ACCEPTED) ) /* offset precompensation for leading F_DUP */ end--; /* Restart the walk through the branchpoints */ branchp++; do { start = *--branchp; offset = end - start; end += growth_factor; if (offset > 0x7fff) UNIMPLEMENTED /* Move the code from here back to the branch * point. */ j = p - (bytecode_p)¤t.code[start+2]; do { *--q = *--p; } while (--j); /* Generate the new branch instructions instead * of copying the old. */ p -= 2; if (opt_flags & VOID_ACCEPTED) { RSTORE_SHORT(q, offset); RSTORE_CODE(q, (bytecode_t)code); } else { RSTORE_CODE(q, F_POP_VALUE); RSTORE_SHORT(q, offset); RSTORE_CODE(q, (bytecode_t)code); RSTORE_CODE(q, F_DUP); } } while (--i > 0); break; /* outer while(), it's finished anyway */ } } /* while(--i > 0); */ break; } /* ({#'?, expr, cond-part, ..., default-part }) * ({#'?!, expr, cond-part, ..., default-part }) */ case F_BRANCH_WHEN_ZERO: case F_BRANCH_WHEN_NON_ZERO: { /* For #'? is compiled into: * * result required: no result required: * * * BRANCH_ZERO l1 BRANCH_ZERO l1 * * BRANCH vd/nvd BRANCH vd/nvd * l1: l1: * BRANCH_ZERO l2 BRANCH_ZERO l2 * * BRANCH vd/nvd BRANCH vd/nvd * l2: l2: * ... ... * ln-1: ln-1: * BRANCH_ZERO ln BRANCH_ZERO ln * * BRANCH vd/nvd BRANCH vd/nvd * ln: ln: * nvd: BRANCH +1 nvd: POP * vd: CONST0 vd: * * (vd: void_dest, nvd: non_void_dest) * * The branch conditions after every are reversed * for #'?! and/or if the returns a result in * reverse logic. And of course the F_BRANCHes are converted * into F_LBRANCHes where necessary. * * If is required but not given, CONST0 is * inserted in its place. In that case, the branches from * s without a result are directed to that one CONST0 * as well. * * There are few other ways to compile the end of the * sequence if no is required and/or not given, * or if no result is required - they are explained * below. */ mp_int *branchp; /* Table storing two values for every argument pair: the * position after the cond-part and the position after * the cond. Yes, in reverse order. */ mp_int i; mp_int start; mp_int end; mp_int void_dest; /* branch dest with no result */ mp_int non_void_dest; /* branch dest with a result */ Bool is_notif; /* TRUE if this is #'?! */ int code; /* The instruction to compile to */ int opt_used; /* Current compile() result */ int all_void; /* !0 if cond-parts returns a value */ mp_int last_branch; /* Position of branch after cond */ non_void_dest = 0; code = type - CLOSURE_OPERATOR; is_notif = (code == F_BRANCH_WHEN_NON_ZERO); /* If the default part exists, is the number 0 or at least * has no side-effects, and if the caller accepts void/0 * for an answer, it is not compiled as it won't have * any effect anyway. */ if (!(block_size & 1) && (opt_flags & (VOID_ACCEPTED|ZERO_ACCEPTED)) && ( opt_flags & VOID_ACCEPTED ? argp[block_size-1].type != T_POINTER /* no side effect */ : argp[block_size-1].type == T_NUMBER && !argp[block_size-1].u.number ) ) { /* Ignore the default-part by hiding it */ block_size--; } /* Generate the code for the (cond, cond-part) pairs, * and add the necessary branch instructions. * Also store the positions of the inserted code * in the branchp table. */ i = block_size; branchp = alloca(i * sizeof *branchp); all_void = VOID_GIVEN; while ( (i -= 2) > 0) { mp_int offset; /* Compile the condition and add the branch * to skip the cond-part. */ opt_used = compile_value(++argp, NEGATE_ACCEPTED); if (current.code_left < 2) realloc_code(); last_branch = current.code_max - current.code_left; current.code_left -= 2; if (opt_used & NEGATE_GIVEN) STORE_CODE(current.codep , (bytecode_t) (is_notif ? F_BRANCH_WHEN_ZERO : F_BRANCH_WHEN_NON_ZERO) ); else STORE_CODE(current.codep, (bytecode_t)code); STORE_UINT8(current.codep, 0); /* Compile the cond-part */ ++argp; opt_used = compile_value(argp, (i == 1 && !all_void) ? opt_flags & REF_REJECTED : opt_flags & (VOID_ACCEPTED|ZERO_ACCEPTED|REF_REJECTED) ); all_void &= opt_used; if (current.code_left < 4) realloc_code(); /* Now that we know the size of the cond-part, store * the branch offset into the branch instruction * before the cond-part. */ offset = current.code_max - current.code_left - last_branch; /* Make sure that the offset won't overflow * when incremented later during backpatching. */ if (offset > 0xfe) { /* The cond-part was too big, we have to change * the 2-Byte F_BRANCH_ into an 3-Byte F_LBRANCH. */ bytecode_p p; mp_int j; /* Move the cond-part one byte forward */ p = current.codep++; j = offset - 2; if (offset > 0x7ffd) UNIMPLEMENTED do { p--; p[1] = *p; } while (--j); current.code_left--; if (current.code[last_branch] == F_BRANCH_WHEN_ZERO) PUT_CODE(current.code+last_branch , F_LBRANCH_WHEN_ZERO); else PUT_CODE(current.code+last_branch , F_LBRANCH_WHEN_NON_ZERO); PUT_SHORT(current.code+last_branch+1, offset+2); } else { /* The offset fits, just store it */ PUT_UINT8(current.code+last_branch+1 , (unsigned char)offset); } /* Store the two branch positions */ *branchp++ = current.code_max - current.code_left; *branchp++ = last_branch; /* Add the unconditional branch. In place of the * offset we store the opt_used flags so that the * later backpatching run knows exactly what the cond-part * left on the stack. */ current.code_left -= 2; STORE_CODE(current.codep, F_BRANCH); STORE_CODE(current.codep, (bytecode_t)opt_used); } /* while() */ /* If i is not zero now, then there is no default part */ /* Now compile the default part. * There are a few conditions to distinguish... */ if ( i /* no default */ && ( opt_flags & VOID_ACCEPTED || (all_void && opt_flags & ZERO_ACCEPTED) ) ) { /* There is no default part, and the caller doesn't * want a result or accepts a zero when we don't * have one. */ mp_int offset; /* corrective offset for the branch after * the last cond */ opt_flags |= VOID_GIVEN; if (all_void) { /* No cond-part returned a result, just remove * the last F_BRANCH. * The code sequence is therefore: * * * BRANCH_ZERO l1 * * BRANCH end * l1: * ... * ln-1: * BRANCH_ZERO ln * * ln: end: */ if (block_size < 2) { /* empty statement: all done */ break; /* switch() */ } offset = -2; void_dest = current.code_max - current.code_left - 2; } else { /* Some cond-parts returned a result: let them * jump to a POP statement. * The code sequence is therefore: * * * BRANCH_ZERO l1 * * BRANCH vd/nvd * l1: * ... * ln-1: * BRANCH_ZERO ln * * nvd: POP * ln: vd: * * TODO: Uhm what if is void? */ /* Terminating void after non-void is avoided */ current.codep[-2] = F_POP_VALUE; offset = -1; non_void_dest = current.code_max - current.code_left - 2; void_dest = non_void_dest + 1; } /* Now rewrite the BRANCH_ZERO ln according to offset */ start = *--branchp; code = GET_CODE(current.code+start); if (code == F_LBRANCH_WHEN_ZERO || code == F_LBRANCH_WHEN_NON_ZERO) { short old_offset; GET_SHORT(old_offset, current.code+start+1); PUT_SHORT(current.code+start+1, old_offset+offset); } else { PUT_INT8(current.code+start+1 , GET_INT8(current.code+start+1) + offset); } /* Prepare for the backpatching run */ current.codep += offset; current.code_left -= offset; branchp--; i = block_size - 2; } else { /* We may or may not have a default part, but * the caller expects a result. */ /* the following assignment is only valid if * * ( !all_void && i "no default" && * ( (opt_flags & (VOID_ACCEPTED|ZERO_ACCEPTED)) == * ZERO_ACCEPTED) ) * * is met, and it is only needed when there is at * least one void expression, too. * However, it's easier to do the assignment * all the time, and it does no harm here. * The effect is that the 'const0' default synthesized * will be used as result from the cond-part, too. */ void_dest = current.code_max - current.code_left; /* Compile the default part */ opt_used = compile_value( i ? &const0 : ++argp, opt_flags & ( all_void ? (VOID_ACCEPTED|ZERO_ACCEPTED|REF_REJECTED) : REF_REJECTED ) ); /* s with a result of course want to branch * after the . */ non_void_dest = current.code_max - current.code_left; if (opt_used & VOID_GIVEN) { /* Whoops, didn't return a result. * Prepare to insert a default, and let the * void-s branch here, too. */ void_dest = non_void_dest; opt_flags |= VOID_GIVEN; } else if (opt_flags & VOID_ACCEPTED) { /* We have a result, but the caller doesn't want * it: add the code sequence * * nvd: POP * vd: */ opt_flags |= VOID_GIVEN; if (current.code_left < 1) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); opt_used = VOID_GIVEN; void_dest = non_void_dest + 1; } else if (all_void && block_size > 2) { /* The caller wants a result, has one, * but none of the s does (and they exist). */ if (current.code_left < 3) realloc_code(); if (block_size > 4 || branchp[-2] - branchp[-1] > 0xfd) { /* There is more than one , or the one * alone needs a long branch: add * * nvd: BRANCH +1 * vd: CONST0 */ void_dest = non_void_dest + 2; current.code_left -= 3; STORE_CODE(current.codep, F_BRANCH); STORE_UINT8(current.codep, 1); STORE_CODE(current.codep, F_CONST0); } else { /* Just one : replace the 'BRANCH end' * by 'CONST0; BRANCH end'. */ bytecode_p p; /* Make space for the CONST0 */ current.code_left--; start = branchp[-2]; move_memory( ¤t.code[start+1], ¤t.code[start], (size_t)(non_void_dest - start) ); current.codep++; /* Add the CONST0 instruction */ PUT_CODE(current.code+start, F_CONST0); /* Set the saved opt_flags to 'not void' */ PUT_UINT8(current.code+start+2, 0); /* Increment the branch offset for the branch * skipping the */ p = current.code+branchp[-1]+1; PUT_UINT8(p, GET_UINT8(p)+1); /* Update the stored position */ branchp[-2] = start+1; non_void_dest++; /* void_dest = start; */ /* all_void isn't used any more, else we'd * need to zero it now. */ } } else if (!i && !all_void && opt_flags & ZERO_ACCEPTED) { /* We had a real with result, there are * some which return a result, and the * caller accepts a zero for void-: add * nvd: BRANCH +1 * vd: CONST0 * if there are void-. */ mp_int *branchp2, j; /* Check all s if there is a void one */ branchp2 = branchp; for (j = block_size; (j -= 2) > 0; ) { start = *(branchp2 -= 2); if (current.code[start+1] & VOID_GIVEN) { /* Yup, we need the extra code. */ void_dest = non_void_dest + 2; non_void_dest += 3; if (current.code_left < 3) realloc_code(); current.code_left -= 3; STORE_CODE(current.codep, F_BRANCH); STORE_UINT8(current.codep, 1); STORE_CODE(current.codep, F_CONST0); break; } } } /* Prepare the backpatching run */ i = block_size; } /* Now walk backwards through all the branches, insert * the proper offset and rewrite them to long branches where * necessary. */ end = current.code_max - current.code_left; while ( (i -= 2) > 0) { mp_int offset; /* Compute the distance to branch */ start = *(branchp -= 2); offset = (GET_UINT8(current.code+start+1) & VOID_GIVEN) ? void_dest - start - 2 : non_void_dest - start - 2; if (offset <= 0xff) { /* A short branch is sufficient. */ PUT_UINT8(current.code+start+1, (bytecode_t)offset); continue; } else { /* We have to rewrite this and all previous * branches to long branches. */ mp_int growth; /* (Current) offset from old pos. */ mp_int j; bytecode_p p, q; /* Determine how much more is needed and allocate * the memory */ growth = (i+1) >> 1; if (current.code_left < growth) realloc_code(); current.code_left -= growth; current.codep += growth; /* Now move the code, starting from the end, * and rewriting the branches when we encounter * them. * The first move will move all the code up to * the end, the next move just the code up to * the following . * The offset from the old position is given * by (q-p). */ p = current.code + end; q = p + growth; branchp += 2; /* have to reconsider this one */ do { unsigned short dist; bytecode_p pstart; /* First, increment the distance of the * branch skipping the previous (it might * already be a long branch). */ start = *--branchp; pstart = current.code+start; code = GET_CODE(pstart); if (code == F_LBRANCH_WHEN_ZERO || code == F_LBRANCH_WHEN_NON_ZERO) { GET_SHORT(dist, pstart+1); PUT_SHORT(pstart+1, dist+1); } else { PUT_UINT8(pstart+1, GET_UINT8(pstart+1)+1); } /* Compute the distance for the branch */ start = *--branchp; offset = (current.code[start+1] & VOID_GIVEN) ? void_dest - start - 1 : non_void_dest - start - 1; /* Count the extra byte we're going to insert */ end++; void_dest++; non_void_dest++; if (offset > 0x7fff) UNIMPLEMENTED /* Compute the distance to store while q and p * give the proper offset. */ dist = (unsigned short)(offset + (q-p)); /* Move the code after this branch. */ j = (p - (current.code + start)) - 2; do { *--q = *--p; } while (--j); /* Store the new branch in place of the old one. */ RSTORE_SHORT(q, dist); p -= 2; code = GET_CODE(p); if (code == F_BRANCH_WHEN_ZERO) RSTORE_CODE(q, F_LBRANCH_WHEN_ZERO); else if (code == F_BRANCH_WHEN_NON_ZERO) RSTORE_CODE(q, F_LBRANCH_WHEN_NON_ZERO); else if (code == F_BRANCH) RSTORE_CODE(q, F_LBRANCH); else fatal("Can't rewrite %s (%02x) at %p\n" , get_f_name(code), code, p); } while ( (i -= 2) > 0); break; /* outer while() - it's finished anyway */ } } /* while() backpatching */ break; } /* ({#', , , ..., }) */ case F_POP_VALUE: { /* This is compiled as: * * * POP * * POP * ... * POP * * * If an expression doesn't return a value, the following * POP is omitted. * * If no expression is given, 'CONST0' is compiled. */ mp_int i; int void_given; /* Compile the first n-1 expressions */ for (i = block_size - 1; --i > 0; ) { void_given = compile_value(++argp, VOID_WANTED); /* If we got a result, pop it */ if ( !(void_given & VOID_GIVEN) ) { if (current.code_left < 1) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); } } /* Compile the last expression. * If there is none (i != 0), use CONST0 instead. */ opt_flags = compile_value(i ? &const0 : ++argp, opt_flags); break; } /* ({#'=, , , ..., , }) */ case F_ASSIGN: { /* This is compiled as: * * * * VOID_ASSIGN * * * VOID_ASSIGN * ... * * * ASSIGN * * If the caller doesn't require a result, the last * ASSIGN is compiled as VOID_ASSIGN. */ mp_int i; /* There must be at least one assignment in order to get * a return value. */ if ( !(i = block_size - 1) || (i & 1) ) lambda_error("Missing value in assignment\n"); argp++; for (; (i -= 2) >= 0; argp+=2) { compile_value(argp+1, REF_REJECTED); compile_lvalue(argp, USE_INDEX_LVALUE); if (!i) { /* Last assignment: we might need to keep this value */ if (opt_flags & VOID_ACCEPTED) { opt_flags = VOID_GIVEN; STORE_CODE(current.codep, F_VOID_ASSIGN); } else { STORE_CODE(current.codep, F_ASSIGN); } } else { /* First assignemnts: forget the value */ STORE_CODE(current.codep, F_VOID_ASSIGN); } current.code_left--; } break; } /* ({#'+=, , }) */ case F_ADD_EQ: /* This is compiled as: * * * * (VOID_)ADD_EQ * * For the special case == 1: * * * (PRE_)INC */ if (block_size != 3) lambda_error( "Bad number of arguments to #'%s\n", instrs[type - CLOSURE_OPERATOR].name ); if (argp[2].type == T_NUMBER && argp[2].u.number == 1) { compile_lvalue(argp+1, USE_INDEX_LVALUE); if (opt_flags & VOID_ACCEPTED) { opt_flags = VOID_GIVEN; STORE_CODE(current.codep, F_INC); } else { STORE_CODE(current.codep, F_PRE_INC); } current.code_left--; } else { compile_value(argp+2, REF_REJECTED); compile_lvalue(argp+1, USE_INDEX_LVALUE); if (opt_flags & VOID_ACCEPTED) { opt_flags = VOID_GIVEN; STORE_CODE(current.codep, F_VOID_ADD_EQ); } else STORE_CODE(current.codep, F_ADD_EQ); current.code_left--; } break; /* ({#'-=, , }) */ case F_SUB_EQ: /* This is compiled as: * * * * SUB_EQ * * For the special case == 1: * * * (PRE_)DEC */ if (block_size != 3) lambda_error( "Bad number of arguments to #'%s\n", instrs[type - CLOSURE_OPERATOR].name ); if (argp[2].type == T_NUMBER && argp[2].u.number == 1) { compile_lvalue(argp+1, USE_INDEX_LVALUE); if (opt_flags & VOID_ACCEPTED) { opt_flags = VOID_GIVEN; STORE_CODE(current.codep, F_DEC); } else { STORE_CODE(current.codep, F_PRE_DEC); } current.code_left--; } else { compile_value(argp+2, REF_REJECTED); compile_lvalue(argp+1, USE_INDEX_LVALUE); STORE_CODE(current.codep, F_SUB_EQ); current.code_left--; } break; /* ({#'op=, , }) * with op: *, &, |, ^, <<, >>, >>>, /, %, &&, || */ case F_MULT_EQ: case F_AND_EQ: case F_OR_EQ: case F_XOR_EQ: case F_LSH_EQ: case F_RSH_EQ: case F_RSHL_EQ: case F_DIV_EQ: case F_MOD_EQ: /* This is compiled as: * * * * _EQ */ if (block_size != 3) { lambda_error( "Bad number of arguments to #'%s\n", instrs[type - CLOSURE_OPERATOR].name ); } compile_value(argp+2, REF_REJECTED); compile_lvalue(argp+1, USE_INDEX_LVALUE); STORE_CODE(current.codep, (bytecode_t)(type - CLOSURE_OPERATOR)); current.code_left--; break; /* ({#'op=, , }) * with op: &&, || */ case F_LAND_EQ: case F_LOR_EQ: { /* This is compiled as: * * * LDUP * l * * l: SWAP_VALUES * ASSIGN * * respectively for long branches: * * * LDUP * DUP * LBRANCH l * POP * * l: SWAP_VALUES * ASSIGN */ mp_int branchp; /* The position of the branch/operator instruction. */ int code; /* Compiled instruction */ Bool is_and; /* TRUE if the operator is F_LAND_EQ */ mp_int end; /* The branch target */ mp_int offset; /* The branch offset */ if (type - CLOSURE_OPERATOR == F_LAND_EQ) { code = F_LAND; is_and = MY_TRUE; } else { code = F_LOR; is_and = MY_FALSE; } if (block_size != 3) { lambda_error( "Bad number of arguments to #'%s\n", instrs[type - CLOSURE_OPERATOR].name ); } compile_lvalue(argp+1, USE_INDEX_LVALUE); if (current.code_left < 3) realloc_code(); current.code_left--; STORE_CODE(current.codep, (bytecode_t)F_LDUP); branchp = current.code_max - current.code_left; current.code_left -= 2; STORE_CODE(current.codep, (bytecode_t)code); STORE_CODE(current.codep, (bytecode_t)0); compile_value(argp+2, REF_REJECTED); /* Store the correct offsets for the operator/branch * instruction. If necessary, the short branch is * converted into long ones. */ end = current.code_max - current.code_left; /* The target to jump to */ offset = end - branchp - 2; if (offset <= 0xff) { PUT_UINT8(current.code+branchp+1, (unsigned char)offset); } else { /* We exceeded the limit of the short offsets. * Extend the offset into long branch. */ mp_int i; bytecode_p p; code = is_and ? F_LBRANCH_WHEN_ZERO : F_LBRANCH_WHEN_NON_ZERO; /* Prepare the copying of the code */ if (current.code_left < 3) realloc_code(); current.code_left -= 3; current.codep += 3; p = current.code + end + 2; for (i = offset; --i >= 0; --p ) *p = p[-3]; p[-4] = F_DUP; p[-3] = code; offset += 3; PUT_SHORT((p-2), offset); if (offset > 0x7fff) UNIMPLEMENTED; p[0] = F_POP_VALUE; } if (current.code_left < 2) realloc_code(); current.code_left -= 2; STORE_CODE(current.codep, (bytecode_t)F_SWAP_VALUES); STORE_CODE(current.codep, (bytecode_t)F_ASSIGN); break; } /* ({#'++, }) * ({#'--, }) */ case F_POST_INC: case F_POST_DEC: /* This is compiled as: * * * (POST_)INC (POST_)DEC */ if (block_size != 2) { lambda_error( "Bad number of arguments to #'%s\n", instrs[type - CLOSURE_OPERATOR].name ); } compile_lvalue(argp+1, USE_INDEX_LVALUE); if (opt_flags & VOID_ACCEPTED) { opt_flags = VOID_GIVEN; if (type-CLOSURE_OPERATOR == F_POST_INC) STORE_CODE(current.codep, F_INC); else STORE_CODE(current.codep, F_DEC); } else STORE_CODE(current.codep, (bytecode_t)type); current.code_left--; break; /* ({#'do, , ... , , }) */ case F_BBRANCH_WHEN_NON_ZERO: { /* This is compiled as: * * l: * POP * * ... * * POP * * BBRANCH_NON_ZERO l * * * If a doesn't return a value, the following POP * is omitted. * * As usual, if the jump distance is too big, the BBRANCH * is converted into a LBRANCH. Also, if the * returns a result in reversed logic, the branch condition * is reversed. */ mp_int i; int void_given; mp_int offset; /* Position of first */ i = block_size - 3; if (i < 0) lambda_error("Missing argument(s) to #'do\n"); offset = current.code_left - current.code_max; /* Compile all the bodys */ if (i) do { void_given = compile_value(++argp, VOID_WANTED); if ( !(void_given & VOID_GIVEN) ) { /* POP the unwanted result */ if (current.code_left < 1) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); } } while(--i); /* Compile the condition */ void_given = compile_value(++argp, NEGATE_ACCEPTED); offset += current.code_max - current.code_left + 1; if (current.code_left < 3) realloc_code(); if (offset > 0xff) { /* We need a long branch */ if (offset > 0x8000) UNIMPLEMENTED current.code_left -= 3; if (void_given & NEGATE_GIVEN) STORE_CODE(current.codep, F_LBRANCH_WHEN_ZERO); else STORE_CODE(current.codep, F_LBRANCH_WHEN_NON_ZERO); STORE_SHORT(current.codep, -offset); } else { current.code_left -= 2; if (void_given & NEGATE_GIVEN) STORE_CODE(current.codep, F_BBRANCH_WHEN_ZERO); else STORE_CODE(current.codep, F_BBRANCH_WHEN_NON_ZERO); STORE_UINT8(current.codep, offset); } /* Compile the result */ opt_flags = compile_value(++argp, opt_flags); break; } /* ({#'while, , , , ... }) */ case F_BBRANCH_WHEN_ZERO: { /* This is compiled as: * * BRANCH l1 * l0: * POP * * ... * * POP * l1: * BRANCH_NON_ZERO l0 * * * If a doesn't return a value, the following POP * is omitted. * * As usual, if the jump distances are too big, the (B)BRANCHes * are converted into LBRANCHes. Also, if the * returns a result in reversed logic, the branch condition * is reversed. */ mp_int i; int void_given; mp_int start_branch; mp_int offset; /* Store the initial branch, and remember its position * for the backpatching. */ if (current.code_left < 2) realloc_code(); current.code_left -= 2; start_branch = current.code_max - current.code_left; STORE_CODE(current.codep, F_BRANCH); STORE_UINT8(current.codep, 0); i = block_size - 3; if (i < 0) lambda_error("Missing argument(s) to #'while\n"); /* Compile all bodies */ offset = current.code_left - current.code_max; argp += 2; if (i) do { void_given = compile_value(++argp, VOID_WANTED); if ( !(void_given & VOID_GIVEN) ) { /* The body returned a result: POP it */ if (current.code_left < 2) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); } } while(--i); /* Store the proper distance into the initial branch. * Rewrite it to a long branch if necessary. */ offset = current.code_max - current.code_left - start_branch; if (offset > 0xff) { bytecode_p p; if (offset > 0x7ffd) UNIMPLEMENTED if (current.code_left < 1) realloc_code(); current.code_left--; /* Move the generated code */ p = (bytecode_p)current.codep++; i = offset; do { p--; p[1] = *p; } while (--i); /* Generate the LBRANCH */ p = current.code+start_branch-2; PUT_CODE(p, F_LBRANCH); PUT_SHORT(p+1, offset+2); start_branch++; } else { PUT_UINT8(current.code+start_branch-1, (unsigned char)offset); } /* Compile the condition and generate the branch */ argp = block->item; void_given = compile_value(++argp, NEGATE_ACCEPTED); if (current.code_left < 3) realloc_code(); offset = current.code_max - current.code_left - start_branch + 1; if (offset > 0xff) { if (offset > 0x8000) UNIMPLEMENTED current.code_left -= 3; if (void_given & NEGATE_GIVEN) STORE_CODE(current.codep, F_LBRANCH_WHEN_ZERO); else STORE_CODE(current.codep, F_LBRANCH_WHEN_NON_ZERO); STORE_SHORT(current.codep, -offset); } else { current.code_left -= 2; if (void_given & NEGATE_GIVEN) STORE_CODE(current.codep, F_BBRANCH_WHEN_ZERO); else STORE_CODE(current.codep, F_BBRANCH_WHEN_NON_ZERO); STORE_UINT8(current.codep, (bytecode_t)offset); } /* Compile the result */ opt_flags = compile_value(++argp, opt_flags); break; } /* ({#'foreach, , , , ... }) * ({#'foreach, ({ , ... }), , , ... }) */ case F_FOREACH: { /* This is compiled as: * * PUSH_(LOCAL_)LVALUE * ... * PUSH_(LOCAL_)LVALUE * * FOREACH c * l: * POP * * ... * * POP * c: FOREACH_NEXT l * e: FOREACH_END * CONST0 * * or if no bodies are given: * * * POP_VALUE * CONST0 * * If a doesn't return a value, the following POP * is omitted. * If the caller doesn't require a result, the final CONST0 * is omitted. */ mp_int i; int void_given; mp_int start; mp_int offset; int vars_given; int body_count; body_count = block_size - 3; if (body_count < 0) lambda_error("Missing argument(s) to #'foreach\n"); if (!body_count) { /* Just create the code for the expression * and pop the value */ compile_value(argp+2, 0); if (current.code_left < 2) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); /* If a result is required, compile a 0 */ if (opt_flags & VOID_ACCEPTED) opt_flags = VOID_GIVEN; else { current.code_left--; STORE_CODE(current.codep, F_CONST0); } break; } /* Create the code to push the variable lvalues */ if ((++argp)->type != T_POINTER) { vars_given = 1; if (!is_lvalue(argp, 0)) lambda_error("Missing variable lvalue to #'foreach\n"); compile_lvalue(argp, 0); } else { svalue_t * svp; svp = argp->u.vec->item; vars_given = i = (int)VEC_SIZE(argp->u.vec); if (!vars_given) lambda_error("Missing variable lvalue to #'foreach\n"); if (vars_given > 0xFE) lambda_error("Too many lvalues to #'foreach: %d\n", vars_given); for ( ; i > 0; i--, svp++) { if (!is_lvalue(svp, 0)) lambda_error("Missing variable lvalue to #'foreach\n"); compile_lvalue(svp, 0); } } /* Create the code for the expression */ compile_value(++argp, 0); /* Create the FOREACH instruction and remember the position */ if (current.code_left < 4) realloc_code(); current.code_left -= 4; STORE_CODE(current.codep, F_FOREACH); STORE_UINT8(current.codep, vars_given+1); STORE_SHORT(current.codep, 0); start = current.code_max - current.code_left; /* Compile all bodies. */ for (i = body_count; i > 0; i--) { void_given = compile_value(++argp, VOID_WANTED); if ( !(void_given & VOID_GIVEN) ) { /* The body returned a result: POP it */ if (current.code_left < 2) realloc_code(); current.code_left--; STORE_CODE(current.codep, F_POP_VALUE); } } /* Store the proper distance into the initial offset. */ offset = current.code_max - current.code_left - start; PUT_SHORT(current.code+start-2, offset); /* Generate the FOREACH_NEXT, followed by F_FOREACH_END. */ if (current.code_left < 5) realloc_code(); current.code_left -= 4; STORE_CODE(current.codep, F_FOREACH_NEXT); STORE_SHORT(current.codep, offset+3); STORE_CODE(current.codep, F_FOREACH_END); /* If a result is required, compile a 0 */ if (opt_flags & VOID_ACCEPTED) opt_flags = VOID_GIVEN; else { current.code_left--; STORE_CODE(current.codep, F_CONST0); } break; } /* ({#'catch, }) * ({#'catch, , 'nolog }) * ({#'catch, , 'publish }) * ({#'catch, , 'nolog, 'publish }) * ({#'catch, , 'nolog, 'publish, 'reserve, }) */ case F_CATCH: { /* This is compiled as: * * CATCH l / CATCH_NO_LOG l * * l: END_CATCH */ mp_int start, offset; int flags, i; int void_given; if (block_size < 2 && block_size > 6) lambda_error("Wrong number of arguments to #'catch\n"); flags = 0; for (i = 3; i <= block_size; i++) { if (argp[i-1].type == T_SYMBOL && mstreq(argp[i-1].u.str, STR_NOLOG)) flags |= CATCH_FLAG_NOLOG; else if (argp[i-1].type == T_SYMBOL && mstreq(argp[i-1].u.str, STR_PUBLISH)) flags |= CATCH_FLAG_PUBLISH; else if (argp[i-1].type == T_SYMBOL && mstreq(argp[i-1].u.str, STR_RESERVE) ) { if (i > block_size) lambda_error("Missing expression for 'reserve " "catch-modifier.\n"); flags |= CATCH_FLAG_RESERVE; if (compile_value(argp+i, 0) & VOID_GIVEN) lambda_error("Expression for 'reserve " "doesn't return a value.\n"); i++; } else lambda_error("Expected 'nolog, 'publish or " "'reserve as catch-modifier.\n"); } if (current.code_left < 3) realloc_code(); current.code_left -= 3; STORE_CODE(current.codep, F_CATCH); STORE_UINT8(current.codep, flags); STORE_UINT8(current.codep, 0); start = current.code_max - current.code_left; void_given = compile_value(++argp, 0); if (current.code_left < 1) realloc_code(); current.code_left -= 1; STORE_CODE(current.codep, F_END_CATCH); offset = current.code_max - current.code_left - start; if (offset > 0xff) { UNIMPLEMENTED } PUT_UINT8(current.code+start-1, (bytecode_t)offset); break; } /* ({#'sscanf, , , , ..., }) */ case F_SSCANF: { /* This is compiled as: * * * * * ... * * SSCANF N+2 */ int lvalues; if ( (lvalues = block_size - 3) < 0) lambda_error("Missing argument(s) to #'sscanf\n"); if (lvalues > 0xff - 2) lambda_error("Too many arguments to #'sscanf\n"); compile_value(++argp, 0); compile_value(++argp, 0); while (--lvalues >= 0) { compile_lvalue(++argp, PROTECT_LVALUE|USE_INDEX_LVALUE); } if (current.code_left < 2) realloc_code(); current.code_left -= 2; STORE_CODE(current.codep, F_SSCANF); STORE_CODE(current.codep, (bytecode_t)(block_size - 1)); break; } #ifdef USE_PARSE_COMMAND /* ({#'parse_command, , , , , ..., }) */ case F_PARSE_COMMAND: { /* This is compiled as: * * * * * * ... * * SSCANF N+2 */ int lvalues; if ( (lvalues = block_size - 3) < 0) lambda_error("Missing argument(s) to #'sscanf\n"); if (lvalues > 0xff - 2) lambda_error("Too many arguments to #'sscanf\n"); compile_value(++argp, 0); compile_value(++argp, 0); compile_value(++argp, 0); while (--lvalues >= 0) { compile_lvalue(++argp, PROTECT_LVALUE|USE_INDEX_LVALUE); } if (current.code_left < 2) realloc_code(); current.code_left -= 2; STORE_CODE(current.codep, F_SSCANF); STORE_CODE(current.codep, (bytecode_t)(block_size - 1)); break; } #endif /* ({#'({, , ..., }) */ case F_AGGREGATE: { /* This is compiled as: * * * ... * * F_AGGREGATE N */ int i, size; size = i = block_size - 1; while (--i >= 0) { compile_value(++argp, REF_REJECTED); } if (current.code_left < 3) realloc_code(); current.code_left -= 3; STORE_CODE(current.codep, F_AGGREGATE); STORE_SHORT(current.codep, size); break; } /* ({#'([, , ..., }) */ case F_M_CAGGREGATE: { /* This is compiled as: * * [0] * ... * [M] * [0] * ... * [M] * M_(C)AGGREGATE N M */ mp_int i, j; mp_int num_keys; /* Number of keys to add */ mp_int num_values; /* Number of values per key */ num_values = 1; i = block_size; num_keys = i - 1; /* Check and compile all mapping keys and values */ for (i = block_size; --i;) { svalue_t *element; if ( (++argp)->type != T_POINTER ) lambda_error("Bad argument to #'([\n"); element = argp->u.vec->item; /* The first array determines the width */ j = (mp_int)VEC_SIZE(argp->u.vec); if (j != num_values) { if (!j) lambda_error("#'([ : Missing key.\n"); if (i != num_keys) lambda_error( "#'([ : Inconsistent value count.\n"); num_values = j; } while (--j >= 0) { compile_value(element++, REF_REJECTED); } } if (current.code_left < 5) realloc_code(); num_values--; /* one item of each subarray is the key */ if ( (num_keys | num_values) & ~0xff) { /* More than 255 keys or values: long instruction */ current.code_left -= 5; STORE_CODE(current.codep, F_M_AGGREGATE); STORE_SHORT(current.codep, num_keys); STORE_SHORT(current.codep, num_values); } else { /* Short instruction */ current.code_left -= 3; STORE_CODE(current.codep, F_M_CAGGREGATE); STORE_UINT8(current.codep, (unsigned char)num_keys); STORE_UINT8(current.codep, (unsigned char)num_values); } break; } #ifdef USE_STRUCTS /* ({#'(<,