Return Styles: Pseud0ch, Terminal, Valhalla, NES, Geocities, Blue Moon.

Pages: 1-4041-

The Next Symta

Name: Anonymous 2014-03-12 19:27

An experimental Lisp-like language:
https://github.com/saniv/symta

Of course that is a prototype, but it shows how initial parentheses-ridden mess (root/boot/*.hit files) bootstraps into a readable language (root/lib/*.hit files).

Name: Anonymous 2014-03-12 21:07

...
[macroexpand &macroexpand]
[macroexpands? &macroexpands?]
@Env)
) No No No No No No No No No No No No No No No No No No No
No No No No No No No No No No No No No No No No No No No


So far, so good. I was expecting more exclamation marks, though.

Name: Anonymous 2014-03-13 1:30

I LOVE YOUR POSTS

Name: Anonymous 2014-03-13 21:36

>>2
Wow, you weren't joking. That's in /root/boot/stage0.hit:224

Name: Anonymous 2014-03-14 16:56

>>2>>4
Because stage0 implements macro system, so `let` macro cannot be used, because it requires the macro system itself. Although stage0 does contain the defintion of `let` (which is immediately used in stage1):


(add_macro let
(_fn (Bs @Xs)
[[_fn (Bs map (_fn (B) (_if ((tag_of B) is text) B (B head)))) @Xs]
[`@` [list @(Bs map (_fn (B) (_if ((tag_of B) is text) Void (B 1))))]]]))

Name: more of this 2014-03-17 10:43

I like how he's not afraid to use uncommon characters in symbols (see symta.lisp).

Name: more of this 2014-03-17 10:45

shit, I meant to bvmp~

Name: more of this 2014-03-17 10:58

god damn it, I'm just too polite

Name: Anonymous 2014-03-18 14:59

It appears that efficiently compiling closures requires a lot of code analysis.

For now I wrote an SSA-translator, which looks very much like CPS translator, but a little more convoluted, because SSA's continuation (the value function pops to return to the previous context) is implicit and passed on the stack. Still basic operators like "load" and "funcall" require explicitly specifying, where you want to store value. Here is how I expand lambdas' into SSA

(test-ssa '(|_fn| (a b) (|_fn| (x) (+ (* a x) b))))

((LABEL "f1302187") (LABEL "f1302189") (BASIC LOAD "r1302193" "env" 0)
(BASIC LOAD "r1302194" "r1302193" 1) (BASIC LOAD "r1302195" "env" 1)
(BASIC * "r1302192" "r1302194" "r1302195") (BASIC LOAD "r1302196" "env" 0)
(BASIC LOAD "r1302197" "r1302196" 2)
(BASIC + "r1302191" "r1302192" "r1302197") (RETURN "r1302191")
(BASIC CLOSURE "r1302190" "f1302189") (RETURN "r1302190")
(BASIC CLOSURE "r1302188" "f1302187"))

Name: Anonymous 2014-03-18 15:01

>>9

The "env" is basically a closure pointer, which acts like a stack frame or infamous "this" pointer from C#/Java.

Name: Anonymous 2014-03-29 21:45

still studying compiler design...

SYMTA> (test-ssa (produce-cps '("_fn" ("x") 123) '("_fn" ("+" "*" "x") ("*" ("+" "x" 1) 2))))
((GOTO "e1302905") (LABEL "f1302906") (MOVE "r" 123) (LABEL "e1302905")
(ALLOC "r" 2) (STORE "r" 0 "f1302906") (STORE "r" 1 "env") (LOAD "c" "r" 0)
(ALLOC "a" 2) (COPY "a" 0 "r" 1) (GOTO "e1302907") (LABEL "f1302908")
(GOTO "e1302909") (LABEL "f1302910") (LOAD "r" "env" 0) (LOAD "r" "r" 3)
(LOAD "c" "r" 0) (ALLOC "a" 4) (COPY "a" 0 "r" 1) (LOAD "r" "env" 0)
(LOAD "r" "r" 1) (STORE "b" 1 "r") (LOAD "r" "env" 1) (STORE "b" 2 "r")
(MOVE "r" 2) (STORE "b" 3 "r") (MOVE "env" "b") (GOTO "c") (LABEL "e1302909")
(ALLOC "r" 2) (STORE "r" 0 "f1302910") (STORE "r" 1 "env") (LOAD "c" "r" 0)
(ALLOC "a" 2) (COPY "a" 0 "r" 1) (LOAD "r" "env" 2) (LOAD "c" "r" 0)
(ALLOC "a" 3) (COPY "a" 0 "r" 1) (LOAD "r" "env" 4) (STORE "b" 1 "r")
(MOVE "r" 1) (STORE "b" 2 "r") (MOVE "env" "b") (GOTO "c") (STORE "b" 1 "r")
(MOVE "env" "b") (GOTO "c") (LABEL "e1302907") (ALLOC "r" 2)
(STORE "r" 0 "f1302908") (STORE "r" 1 "env") (STORE "b" 1 "r")
(MOVE "env" "b") (GOTO "c"))
SYMTA>

Name: Anonymous 2014-03-30 12:01

Finally the C/C++ code emitter...

SYMTA> (ssa-to-c (cps-to-ssa (produce-cps '("_fn" ("x") 123) '("_fn" ("+" "*" "x") ("*" ("+" "x" 1) 2)))))

"static void entry() {
e1303072();
}

static void f1303073() {
R = 123;
}

static void e1303072() {
R = alloc(2);
R[0] = f1303073;
R[1] = ENV;
C = R[0];
A = alloc(2);
A[0] = R[1];
e1303074();
}

static void f1303075() {
e1303076();
}

static void f1303077() {
R = ENV[0];
R = R[3];
C = R[0];
A = alloc(4);
A[0] = R[1];
R = ENV[0];
R = R[1];
B[1] = R;
R = ENV[1];
B[2] = R;
R = 2;
B[3] = R;
ENV = B;
C();
}

static void e1303076() {
R = alloc(2);
R[0] = f1303077;
R[1] = ENV;
C = R[0];
A = alloc(2);
A[0] = R[1];
R = ENV[2];
C = R[0];
A = alloc(3);
A[0] = R[1];
R = ENV[4];
B[1] = R;
R = 1;
B[2] = R;
ENV = B;
C();
B[1] = R;
ENV = B;
C();
}

static void e1303074() {
R = alloc(2);
R[0] = f1303075;
R[1] = ENV;
B[1] = R;
ENV = B;
C();
}
"

Name: Anonymous 2014-04-02 18:10

somewhat optimized version of the Lisp to C compiler. Next optimization would be passing parameters in registers and reusing parent closure, that way we can get away without allocating closures in most cases.

SYMTA> (ssa-compile '("*" ("+" 123 789) 456))
"#include \"common.h\"
static void f3164();
static void f3166();
static void f3168();
static void f3170();
static void f3172();
static void f3174();
static void e3173();
static void e3171();
static void e3169();
static void e3167();
static void e3165();
static void f3176();
static void f3178();
static void e3177();
static void e3175();
static void e3163();
static void entry() {
MOVE(R, run);
TAGCHECK(R, T_CLOSURE);
MOVE(C, R);
ALLOC(A, 1);
e3163();
}

static void f3164() {
printf(\"entering %s\\n\", \"f3164\");
e3165();
}

static void f3166() {
printf(\"entering %s\\n\", \"f3166\");
e3167();
}

static void f3168() {
printf(\"entering %s\\n\", \"f3168\");
LOAD(R, P, 0);
LOAD(R, R, 1);
TAGCHECK(R, T_CLOSURE);
MOVE(C, R);
ALLOC(A, 2);
e3169();
}

static void f3170() {
printf(\"entering %s\\n\", \"f3170\");
e3171();
}

static void f3172() {
printf(\"entering %s\\n\", \"f3172\");
LOAD(R, P, 0);
LOAD(R, R, 1);
TAGCHECK(R, T_CLOSURE);
MOVE(C, R);
ALLOC(A, 2);
e3173();
}

static void f3174() {
printf(\"entering %s\\n\", \"f3174\");
LOAD(R, P, 0);
LOAD(R, R, 0);
TAGCHECK(R, T_CLOSURE);
MOVE(C, R);
ALLOC(A, 3);
LOAD(R, P, 1);
LOAD(R, R, 0);
STORE(A, 0, R);
LOAD(R, P, 2);
LOAD(R, R, 0);
STORE(A, 1, R);
LOAD(R, E, 0);
STORE(A, 2, R);
MOVE(E, A);
CALL(C);
}

static void e3173() {
printf(\"entering %s\\n\", \"e3173\");
ALLOC(R, 3);
COPY(R, 0, P, 1);
COPY(R, 1, P, 0);
COPY(R, 2, P, 2);
CLOSURE(R, f3174, R);
STORE(A, 0, R);
LOAD(R, E, 0);
STORE(A, 1, R);
MOVE(E, A);
CALL(C);
}

static void e3171() {
printf(\"entering %s\\n\", \"e3171\");
ALLOC(R, 3);
COPY(R, 0, P, 0);
COPY(R, 1, P, 1);
STORE(R, 2, E);
CLOSURE(R, f3172, R);
MOVE(C, R);
ALLOC(A, 1);
STRING(R, \"+\");
STORE(A, 0, R);
MOVE(E, A);
CALL(C);
}

static void e3169() {
printf(\"entering %s\\n\", \"e3169\");
ALLOC(R, 2);
COPY(R, 0, P, 0);
COPY(R, 1, P, 1);
CLOSURE(R, f3170, R);
STORE(A, 0, R);
LOAD(R, E, 0);
STORE(A, 1, R);
MOVE(E, A);
CALL(C);
}

static void e3167() {
printf(\"entering %s\\n\", \"e3167\");
ALLOC(R, 2);
COPY(R, 0, P, 0);
STORE(R, 1, E);
CLOSURE(R, f3168, R);
MOVE(C, R);
ALLOC(A, 1);
STRING(R, \"*\");
STORE(A, 0, R);
MOVE(E, A);
CALL(C);
}

static void e3165() {
printf(\"entering %s\\n\", \"e3165\");
ALLOC(R, 1);
STORE(R, 0, E);
CLOSURE(R, f3166, R);
MOVE(C, R);
ALLOC(A, 1);
e3175();
}

static void f3176() {
printf(\"entering %s\\n\", \"f3176\");
LOAD(R, E, 2);
TAGCHECK(R, T_CLOSURE);
MOVE(C, R);
ALLOC(A, 3);
e3177();
}

static void f3178() {
printf(\"entering %s\\n\", \"f3178\");
LOAD(R, P, 0);
LOAD(R, R, 1);
TAGCHECK(R, T_CLOSURE);
MOVE(C, R);
ALLOC(A, 3);
LOAD(R, P, 0);
LOAD(R, R, 0);
STORE(A, 0, R);
LOAD(R, E, 0);
STORE(A, 1, R);
INTEGER(R, 456);
STORE(A, 2, R);
MOVE(E, A);
CALL(C);
}

static void e3177() {
printf(\"entering %s\\n\", \"e3177\");
ALLOC(R, 1);
STORE(R, 0, E);
CLOSURE(R, f3178, R);
STORE(A, 0, R);
INTEGER(R, 123);
STORE(A, 1, R);
INTEGER(R, 789);
STORE(A, 2, R);
MOVE(E, A);
CALL(C);
}

static void e3175() {
printf(\"entering %s\\n\", \"e3175\");
ALLOC(R, 0);
CLOSURE(R, f3176, R);
STORE(A, 0, R);
MOVE(E, A);
CALL(C);
}

static void e3163() {
printf(\"entering %s\\n\", \"e3163\");
ALLOC(R, 0);
CLOSURE(R, f3164, R);
STORE(A, 0, R);
MOVE(E, A);
CALL(C);
}

Name: Anonymous 2014-04-02 22:27

>>9
It appears that efficiently compiling anything requires a lot of code analysis.
FTFY

Name: Anonymous 2014-04-02 23:25

PLEASE
POST
MORE

Name: Anonymous 2014-04-03 4:57

Mooooooooar pls :D

Name: Anonymous 2014-04-03 7:55

Your piece of shit C code leaks memory. Also, it allocates memory with malloc for something as simple as multiply and addition. I would guess even normal Lisp interpreter would be faster than your shit.

Name: Anonymous 2014-04-03 8:28

Just use fucking LLVM already.

Name: Anonymous 2014-04-03 10:06

>>18
LLVM can't target 8-bit AVR controllers.

Name: Anonymous 2014-04-03 15:01

I once considered renaming Symta to "The Hitler Programming Language" (there is already The Stalin Scheme Compiler) or "The Obama Programming Language" (Iraq and Afghanistan were a piece a cake), but since the Putin's invasion of Crimea, I will rename Symta to "The Putin Programming Language".

Name: Anonymous 2014-04-03 15:10

>>14

You can use lambda closures to represent anything.

Name: Anonymous 2014-04-03 15:32

>>20
Just go with "Auschwitz".

Name: Anonymous 2014-04-03 15:47

>>18

http://en.wikipedia.org/wiki/LLVM
Written in C++
no thanks.

Name: Anonymous 2014-04-03 18:01

>>21
Still a weaker statement of analysis complexity than >>14-san's.

Name: Anonymous 2014-04-03 18:09

So basically a barebone Scheme to C or ASM compiler takes about 200 lines of Common Lisp code. Same should be true for Haskell (typechecker would be a set of macros and not a part of the compiler). The most complex part should be runtime, because support for bignums requires a lot of assembly hacking.

Name: Anonymous 2014-04-03 18:26

>>25

The cool point about the compiler, is that while it is the part of runtime, you can use it to compile itself, given that you take most of the macros from the host Lisp system. You would need at least DEFUN, IF, LET and COND macros to write a Scheme compiler.

Name: Anonymous 2014-04-03 18:36

>>24
Do you know of any computation system, which cannot be expressed in the terms of Lambda Calculus and therefore cannot be efficiently compiled using the techniques applicable for Lambda Calculus or Turing Machine, to which Lambda Calculus corresponds?

Name: Anonymous 2014-04-03 18:56

>>27
PHP

Name: Anonymous 2014-04-03 19:00

>>27
That's not the point I was trying to make. ``Analyzing code is hard'' is more stringent than ``Analyzing lambda calculus is hard''. The latter statement admits the possibility of a computation system which is easy to analyze, but which, when transformed into lambda calculus (by any and every method of transformation), is hard to analyze. The former does not.

Name: Anonymous 2014-04-03 19:15

>>27
And after reading this thread again I completely misread what you were saying. Ignore >>29, I don't know what I'm talking about.

Name: Anonymous 2014-04-04 18:17

>>30

I'm saying that if you have efficient compiler for lambda calculus, then efficiently compiling for example Java code is the matter of translating Java to lambda calculus and running the present compiler.

Name: Anonymous 2014-04-04 19:06

>>31
Still, doesn't that throw the work back onto the ``translating Java to lambda calculus'' phase?

Name: Anonymous 2014-04-05 6:30

>>32
Nope. Java's compiler already does this transformation, producing code linked with environment, and there are always special cases of continuations, although Java disallows TCO, it always has the program counter and it's environment on the stack.

Name: Anonymous 2014-04-05 6:37

>>33
So you're saying that, internally, the Java compiler explicitly translates input source code to lambda calculus representation, then translates that lambda calculus to Java bytecode? Interesting, I didn't know that.

Name: Anonymous 2014-04-05 8:57

>>34
Nope. Java compiler translates to a representation, closely matching continuation-passing-style

BTW, what does the ((λ (f) (f f)) (call/cc call/cc)) do?

Name: Anonymous 2014-04-05 10:51

this fucking slav nigger is crazier than tdavis. we should put him down.

Name: Anonymous 2014-04-05 14:39

>>35
I still don't think I'm following you. Which one is it? If the Java compiler perform its compilation by ``translating Java to lambda calculus and running the present compiler'', then why say ``Nope''? If the Java compiler doesn't perform its compilation by ``translating Java to lambda calculus and running the present compiler'', why say ``Java's compiler already does this transformation''?

Your example is also not illuminating. What does a modified form of the yin yang puzzle have to do with compiling Java?

Name: Anonymous 2014-04-05 15:27

>>37
"Nope" means you don't understand what Java compiler translates Java expressions into a form called SSA, which could be translated into continuation-passing-style (a simplified form of Lambda Calculus).

And wtf is "yin yang puzzle"? And where is the my "not illuminating" "example"?

Name: Anonymous 2014-04-05 16:48

>>38
It seems clear from your ``Nope'' that you aren't claiming that lambda calculus and SSA representation are the same thing, but that it is trivial to translate between them. I don't care enough to verify that, so that sounds reasonable. For your reference, what confused me was that, when you said ``Java's compiler already does this transformation'', you weren't referring to the full transformation of Java to lambda calculus that I was asking about from your ``efficiently compiling for example Java code is the matter of translating Java to lambda calculus and running the present compiler'' statement.

As to ``wtf is "yin yang puzzle"?'', I'll refer you to these search results for ``yin yang callcc'':

http://everything2.com/title/call%252Fcc+yin-yang+puzzle
http://stackoverflow.com/questions/2694679/how-does-the-yin-yang-puzzle-work

Name: Anonymous 2014-04-05 17:26

>>39

It seems clear from your ``Nope'' that you aren't claiming that lambda calculus and SSA representation are the same thing, but that it is trivial to translate between them.
Exactly! It requires less steps to translate between CPS and SSA than for example to translate between x86 and C++ assembly code, because translating machine code back into C++ also requires intermediate form. De-compilers usually construct jmp/branch graphs, nodes of which correspond to lambdas, and the next stage would be determining their environment (which values are alive for which node).

For your reference, what confused me was that, when you said ``Java's compiler already does this transformation'', you weren't referring to the full transformation of Java to lambda calculus that I was asking about from your ``efficiently compiling for example Java code is the matter of translating Java to lambda calculus and running the present compiler'' statement.
Oh yes. These are the nuances of compiler implementation. Nothing stops compiler designer to use CPS instead of other equivalent intermediate forms. These forms are different in the same way XML is different from SEXPs.

http://everything2.com/title/call%252Fcc+yin-yang+puzzle
Cool stuff! Never knew about it. I've seen (call/cc call/cc) in some usenet discussion. It is basically a version of Y-combinator.

Name: Anonymous 2014-04-06 19:04

Providing essential arithmetic functions requires more lines of C/C++ code than the whole compiler itself! And these are very basic - no bignums!
[m]
static void b_add() {
void *k, *a, *b;
CHECK_NARGS(3);
k = getArg(0);
a = getArg(1);
b = getArg(2);
CHECK_TAG(a, T_INTEGER);
CHECK_TAG(b, T_INTEGER);
ALLOC(E, 1);
STORE(E, 0, getVal(a) + getVal(b));
MOVE(N, 1);
CALL(k);
}

static void b_mul() {
void *k, *a, *b;
CHECK_NARGS(3);
k = getArg(0);
a = getArg(1);
b = getArg(2);
CHECK_TAG(a, T_INTEGER);
CHECK_TAG(b, T_INTEGER);
ALLOC(E, 1);
STORE(E, 0, (getVal(a)>>TAG_BITS) * getVal(b));
MOVE(N, 1);
CALL(k);
}

static struct {
char *name;
void (*fun)();
} builtins[] = {
{"+", b_add},
{"*", b_mul},
{0, 0}
};

static void host_f() {
int i;
uintptr_t *k, *t_name;
CHECK_NARGS(2);
k = getArg(0);
t_name = getArg(1);
CHECK_TAG(t_name, T_STRING);

char *name = (char*)getVal(t_name);
for (i = 0; builtins[i].name; i++) {
if (!strcmp(builtins[i].name, name)) {
ALLOC(E, 1);
CLOSURE(R, builtins[i].fun);
STORE(E, 0, R);
MOVE(N, 1);
CALL(k);
}
}

printf("host doesn't provide \"%s\"\n", name);
abort();
}
[/m]

Name: Anonymous 2014-04-06 19:05

This retarded engine doesn't support the [m] tag

static void b_add() {
void *k, *a, *b;
CHECK_NARGS(3);
k = getArg(0);
a = getArg(1);
b = getArg(2);
CHECK_TAG(a, T_INTEGER);
CHECK_TAG(b, T_INTEGER);
ALLOC(E, 1);
STORE(E, 0, getVal(a) + getVal(b));
MOVE(N, 1);
CALL(k);
}

static void b_mul() {
void *k, *a, *b;
CHECK_NARGS(3);
k = getArg(0);
a = getArg(1);
b = getArg(2);
CHECK_TAG(a, T_INTEGER);
CHECK_TAG(b, T_INTEGER);
ALLOC(E, 1);
STORE(E, 0, (getVal(a)>>TAG_BITS) * getVal(b));
MOVE(N, 1);
CALL(k);
}

static struct {
char *name;
void (*fun)();
} builtins[] = {
{"+", b_add},
{"*", b_mul},
{0, 0}
};

static void host_f() {
int i;
uintptr_t *k, *t_name;
CHECK_NARGS(2);
k = getArg(0);
t_name = getArg(1);
CHECK_TAG(t_name, T_STRING);

char *name = (char*)getVal(t_name);
for (i = 0; builtins[i].name; i++) {
if (!strcmp(builtins[i].name, name)) {
ALLOC(E, 1);
CLOSURE(R, builtins[i].fun);
STORE(E, 0, R);
MOVE(N, 1);
CALL(k);
}
}

printf("host doesn't provide \"%s\"\n", name);
abort();
}

Name: Anonymous 2014-04-06 19:09

>>42
It supports monospaced tags just fine. You were just doing it wrong.

Name: Anonymous 2014-04-07 16:33

>>43

[pre]does that work?[/pre]

Name: Anonymous 2014-04-07 17:33

>>44
There is a preview button.

Name: Anonymous 2015-05-09 17:47

>>40
Who are you quoting?

Name: Anonymous 2015-05-10 11:37

>>46
Whom are you quoting?

Don't change these.
Name: Email:
Entire Thread Thread List