1 module arsd.structures;
2 import std.stdio;
3 
4 /*
5 	Allocators:
6 
7 	Simple allocators:
8 		memory regions that can optionally grow
9 
10 	static arrays are simple allocators
11 	slices are simple allocators
12 
13 	Complex allocators take simple allocators
14 
15 */
16 
17 /*
18 class Foo {
19 	void bar() {}
20 }
21 */
22 
23 alias char[] Foo;
24 
25 void main() {
26 	char[100] test;
27 	test[0 .. 5] = "hello";
28 	auto a = Unique!(Foo)(test[0..5]);
29 	//a.bar();
30 
31 	lol(a.loan);
32 	//int b = *a;
33 	writeln("about to enter u");
34 	u(a.release);
35 	writeln("u is gone");
36 }
37 
38 void lol(Loaned!Foo foo) {
39 	write("payload is: ");
40 	void writer(char c) { write(c); }
41 	foo.visitEach(&writer);
42 	writeln();
43 	writeln(foo[2]);
44 }
45 void u(Unique!Foo foo) {
46 	writeln("in u");
47 	writeln("leaving u");
48 }
49 
50 struct Unique(T) {
51 	@disable this();
52 	@disable this(this) {}
53 
54 	private T payload;
55 
56 	T opDot() { return payload; }
57 
58 	this(T t) {
59 		this.payload = t;
60 	}
61 
62 	~this() {
63 		if(this.payload !is null) {
64 			delete this.payload;
65 			writefln("payload deleted");
66 		}
67 	}
68 
69 	typeof(*payload) opUnary(string op : "*")() {
70 		return *payload;
71 	}
72 
73 	Unique!T release() {
74 		auto oldpayload = this.payload;
75 		this.payload = null;
76 		return Unique!T(oldpayload);
77 	}
78 
79 	Loaned!T loan() {
80 		return this.payload.loan;
81 	}
82 }
83 
84 /*
85 	Loans are not copyable and not default constructable, and not assignable.
86 
87 	What this means is:
88 
89 	Loaned!T l;
90 	{
91 		Unique!T u;
92 		l = u.loan;
93 	}
94 	// l now refers to a dead pointer
95 
96 	Is impossible.
97 */
98 
99 mixin template SliceFunctions(T) {
100 	auto opIndex(size_t idx) {
101 		return payload[idx];
102 	}
103 
104 	T copyInto(T where, size_t idxStart = 0) {
105 		where[] = payload[idxStart .. idxStart + where.length];
106 		return where;
107 	}
108 
109 	version(without_gc) {} else
110 	T dupGc() pure const {
111 		return payload.dup;
112 	}
113 
114 	int opApply(scope int delegate(typeof(payload[0])) dg) {
115 		foreach(c; payload)
116 			if(auto result = dg(c))
117 				return result;
118 		return 0;
119 	}
120 
121 	Loaned!(typeof(payload.ptr)) ptr() {
122 		return Loaned!(typeof(payload.ptr))(payload.ptr);
123 	}
124 
125 	Loaned!(T) opSlice(size_t s1, size_t s2) {
126 		return Loaned!(T)(payload[s1 .. s2]);
127 	}
128 }
129 
130 mixin template PointerFunctions(T) {
131 	typeof(*payload) opUnary(string op : "*")() @safe {
132 		return *payload;
133 	}
134 }
135 
136 struct Loaned(T) {
137 	@disable this();
138 	@disable this(this) {}
139 	private T payload;
140 
141 	this(T t) @safe {
142 		this.payload = t;
143 	}
144 
145 	// static if(isPointer!T)
146 
147 	// static if(isPointer!T || isArray!T) or isSliceable!T ???
148 	/*
149 		We can:
150 			1) set a slice
151 			2) copy a slice (given a buffer or an allocator)
152 			3) visit a slice (non-copyable range?)
153 
154 			But we cannot *get* a slice, since that could escape the reference. Perhaps this could be allowed in @system code though.
155 
156 	*/
157 
158 	Loaned!T loan() @safe {
159 		return Loaned!T(this.payload);
160 	}
161 }
162 
163 Loaned!T loan(T)(T t) {
164 	return Loaned!T(t);
165 }
166 
167 
168 import core.stdc.stdlib;
169 alias realloc manual_realloc;
170 alias free manual_free;
171 
172 void manual_free(Object o) {
173 	auto dtor = cast(void function(Object o)) Object.classinfo.destructor;
174 	if(dtor)
175 		dtor(o);
176 	free(cast(void*) o);
177 }
178 
179 // Don't forget that if you save a slice and this goes out of scope, your
180 // slice is no good!
181 struct InPlaceArray(ElementType, size_t capacity) {
182 	ElementType[capacity] buffer;
183 	size_t length;
184 	inout(ElementType)[] slice() inout { return buffer[0 .. this.length]; }
185 	alias slice this;
186 
187 	typeof(this) opOpAssign(string op : "~")(in T[] rhs) {
188 		buffer[this.length .. this.length + rhs.length] = rhs[];
189 		this.length += rhs.length;
190 		return this;
191 	}
192 }
193 
194 struct LinkedList(T, alias allocator) {
195 
196 	private struct LinkedListNode {
197 		T payload;
198 		LinkedListNode* next;
199 	}
200 
201 	LinkedListNode* front;
202 
203 	void prepend(T what) {
204 		LinkedListNode* node;
205 		static if(is(typeof(allocator) == typeof(null)))
206 			node = new LinkedListNode;
207 		else
208 			node = allocator.allocate();
209 
210 		node.payload = what;
211 		node.next = front;
212 
213 		front = node;
214 	}
215 
216 	void removeFront() {
217 		auto old = front;
218 		front = front.next;
219 
220 		static if(!is(typeof(allocator) == typeof(null)))
221 			allocator.free(old);
222 	}
223 
224 	auto range() {
225 
226 	}
227 }
228 
229 void llTest() {
230 	//Pool!(LinkedList!int.LinkedListNode, 16) pool;
231 	LinkedList!(int, null) list;
232 
233 	list.prepend(10);
234 	list.prepend(15);
235 
236 	import std.stdio;
237 	auto front = list.front;
238 	while(front) {
239 		writeln(front.payload);
240 		front = front.next;
241 	}
242 }
243 version(none)
244 void main() { llTest(); }
245 
246 /*
247 	Two types of containers:
248 
249 	1) need to grow themselves
250 		e.g. arrays
251 
252 		These need a backing with the append operation (which may or may not allocate)
253 		and perhaps discard and reserve functions
254 
255 		Should they own the memory? I think no.. but yes.
256 
257 	2) need to create sub-containers
258 		e.g. linked lists
259 
260 		These need an allocator
261 */
262 
263 struct Stack(alias Backing) {
264 	private InPlaceArray!(ElementType, capacity) buffer;
265 
266 	void push(ElementType t) {
267 		buffer ~= t;
268 	}
269 
270 	ElementType pop() {
271 		assert(buffer.length);
272 		return buffer.buffer[--buffer.length];
273 	}
274 }
275 
276 struct InPlaceQueue(ElementType, size_t capacity) {
277 	ElementType[capacity] buffer;
278 	size_t length;
279 
280 }
281 
282 struct InPlaceLinkedList(ElementType, size_t capacity) {
283 	struct ListItem {
284 		ElementType content;
285 		ListItem* next;
286 	}
287 
288 	Pool!(ListItem, capacity) pool;
289 
290 	ListItem* front;
291 
292 	ListItem* prepend(ElementType element) {
293 		auto item = pool.allocate();
294 		item.content = element;
295 		item.next = front;
296 		front = item;
297 		return item;
298 	}
299 
300 	void removeNext(ListItem* ptr) {
301 		assert(ptr !is null);
302 		assert(ptr.next !is null);
303 		auto oldptr = ptr.next;
304 		ptr.next = ptr.next.next;
305 		pool.free(oldptr);
306 	}
307 
308 	void popHead() {
309 		assert(front !is null);
310 		front = front.next;
311 		pool.free(front);
312 	}
313 
314 	// since we have to find the previous item to keep the chain going, this is going
315 	// to be a little slower than the above two
316 	void findAndRemove(ListItem* ptr) {
317 		assert(ptr !is null);
318 		auto f = this.front;
319 		ListItem* prev = null;
320 		while(f) {
321 			if(f is ptr) {
322 				if(prev is null) {
323 					// it is the first one, so we can't remove next
324 					popHead();
325 				} else
326 					removeNext(prev);
327 				break;
328 			}
329 			prev = f;
330 			f = f.next;
331 		}
332 	}
333 }
334 
335 size_t[max] arrayTo(size_t max)() {
336 	size_t[max] a;
337 	foreach(i; 0 .. max)
338 		a[i] = i;
339 	return a;
340 }
341 
342 
343 // thanks, Walter!
344 struct CircularBuffer(U: T[dim], T, size_t dim)
345 {
346     void put(T e)
347     {
348         assert(length != dim);
349         size_t i = first + length;
350         if (i >= dim)
351             i -= dim;
352         buf[i] = e;
353         length += 1;
354     }
355 
356     @property T front()
357     {
358         assert(length);
359         return buf[first];
360     }
361 
362     void popFront()
363     {
364         assert(length);
365         first += 1;
366         if (first == dim)
367             first = 0;
368         --length;
369     }
370 
371     @property bool full()
372     {
373         return length == dim;
374     }
375 
376     @property bool empty()
377     {
378         return length == 0;
379     }
380 
381     T opIndex(size_t i)
382     {
383         assert(i < length);
384         i += first;
385         if (i >= dim)
386             i -= dim;
387         return buf[i];
388     }
389 
390     size_t length;
391 
392   private:
393     size_t first;
394     T[dim] buf = void;
395 }
396 
397 unittest
398 {
399     CircularBuffer!(ubyte[3]) buf;
400     assert(buf.empty);
401     assert(buf.length == 0);
402     assert(!buf.full);
403 
404     buf.put(7);
405     assert(!buf.empty);
406     assert(buf.length == 1);
407     assert(!buf.full);
408     assert(buf[0] == 7);
409 
410     buf.put(8);
411     buf.put(9);
412     assert(!buf.empty);
413     assert(buf.length == 3);
414     assert(buf.full);
415 
416     assert(buf.front == 7);
417     buf.popFront();
418     buf.put(10);
419     assert(!buf.empty);
420     assert(buf.length == 3);
421     assert(buf.full);
422     assert(buf[0] == 8);
423     assert(buf[1] == 9);
424     assert(buf[2] == 10);
425 }
426 
427 struct Pool(ElementType, size_t capacity) {
428 	ElementType[capacity] buffer;
429 	bool[capacity] bufferSlotInUse;
430 
431 	CircularBuffer!(size_t[capacity]) freeList;
432 	bool initialized;
433 
434 	// it is your responsibility to initialize the object!
435 	ElementType* allocate() {
436 		if(!initialized) {
437 			foreach(i; 0 .. capacity)
438 				freeList.put(i);
439 			initialized = true;
440 		}
441 
442 		auto firstAvailableSlot = freeList.front;
443 		freeList.popFront;
444 
445 		bufferSlotInUse[firstAvailableSlot] = true;
446 		auto slot = &(buffer[firstAvailableSlot]);
447 
448 		return slot;
449 	}
450 
451 	// it is your responsibility to run the destructor!
452 	void free(ref ElementType* item) {
453 		assert(item >= buffer.ptr);
454 		assert(item < (buffer.ptr + capacity * ElementType.sizeof));
455 
456 		size_t idx = (item - buffer.ptr) / ElementType.sizeof;
457 		assert(idx < capacity);
458 		bufferSlotInUse[idx] = false;
459 
460 		freeList.put(idx);
461 
462 		item = null;
463 	}
464 }
465 
466 
467 final class HeapArrayBacking(T) {
468 	T* backing;
469 	size_t capacity;
470 	size_t length;
471 	int refcount;
472 
473 	T[] slice() {
474 		return backing[0 .. length];
475 	}
476 
477 	void setCapacity(size_t capacity) {
478 		backing = cast(T*) manual_realloc(backing, capacity * T.sizeof);
479 		this.capacity = capacity;
480 		if(length > capacity)
481 			length = capacity;
482 	}
483 
484 	void append(T rhs) {
485 		if(length == capacity) {
486 			throw new Exception("out of space");
487 			// FIXME: realloc?
488 		}
489 		backing[this.length] = rhs;
490 		this.length ++;
491 	}
492 
493 	void append(in T[] rhs) {
494 		if(length == capacity) {
495 			throw new Exception("out of space");
496 			// FIXME: realloc?
497 		}
498 		backing[this.length .. this.length + rhs.length] = rhs[];
499 		this.length += rhs.length;
500 	}
501 
502 	T at(size_t idx, string file = __FILE__, size_t line = __LINE__) {
503 		if(idx >= length)
504 			throw new Exception("range error", file, line);
505 		return backing[idx];
506 	}
507 
508 	this() {
509 
510 	}
511 
512 	~this() {
513 		assert(this.refcount == 0);
514 		if(backing !is null) {
515 			manual_free(backing);
516 		}
517 	}
518 }
519 
520 struct HeapArray(T) {
521 	HeapArrayBacking!T backing;
522 	@disable this();
523 
524 	this(size_t capacity) {
525 		//backing = new HeapArrayBacking!T;
526 		void* buffer = malloc(__traits(classInstanceSize, HeapArrayBacking!T));
527 		buffer[0 .. __traits(classInstanceSize, HeapArrayBacking!T)] = HeapArrayBacking!T.classinfo.init[];
528 
529 		backing = cast(HeapArrayBacking!T) buffer;
530 
531 		backing.setCapacity(capacity);
532 		backing.refcount++;
533 	}
534 
535 	this(HeapArray!T reference) {
536 		backing = reference.backing;
537 		backing.refcount++;
538 	}
539 
540 	this(this) {
541 		backing.refcount++;
542 	}
543 
544 	~this() {
545 		backing.refcount--;
546 		if(backing.refcount == 0)
547 			manual_free(backing);
548 	}
549 
550 	@disable void opAssign(typeof(this) rhs) {}
551 
552 	typeof(this) copy() {
553 		auto cp = HeapArray!T(this.backing.capacity);
554 		cp ~= this.slice();
555 		return cp;
556 	}
557 
558 	typeof(this) opOpAssign(string op : "~")(in T[] rhs) {
559 		backing.append(rhs);
560 		return this;
561 	}
562 
563 	typeof(this) opOpAssign(string op : "~")(in T rhs) {
564 		backing.append(rhs);
565 		return this;
566 	}
567 
568 	T opIndex(size_t idx, string file = __FILE__, size_t line = __LINE__) {
569 		return backing.at(idx, file, line);
570 	}
571 
572 	T[] slice() { return backing.slice; }
573 	alias slice this;
574 }
575 
576 void poolTest() {
577 	Pool!(int, 6) pool;
578 	foreach(i; 0 .. 10000) {
579 		auto a = pool.allocate();
580 		*a = 10;
581 		pool.free(a);
582 	}
583 }
584 
585 void gcTest() {
586 	foreach(i; 0 .. 10000) {
587 		auto a = new int;
588 		*a = 10;
589 	}
590 }
591 
592 
593 version(none)
594 void main() {
595 	import std.datetime;
596 	import std.stdio;
597 
598 	auto r = benchmark!(poolTest, gcTest)(10_000);
599 	writeln(r);
600 	writeln(cast(real) r[1].length / r[0].length);
601 
602 /*
603 	InPlaceLinkedList!(int, 3) list;
604 
605 	list.prepend(10);
606 	auto t = list.prepend(30);
607 	list.prepend(40);
608 	list.findAndRemove(t);
609 	list.prepend(50);
610 
611 	import std.stdio;
612 	auto front = list.front;
613 	while(front) {
614 		writeln(front.content);
615 		front = front.next;
616 	}
617 */
618 }
619 /**
620 	Memory management tips
621 
622 	1) use StackArrays whenever you can
623 	2) HeapArrays are the second choice, they are refcounted automatically
624 	3) OwnedArrays are like heap arrays, but non-copyable
625 */
626 
627 
628 // non-refcounted, non-copyable
629 struct OwnedArray(T) {
630 	  @disable this();
631 	  @disable this(this);
632 	  this(size_t capacity) {
633 
634 	  }
635 
636 	  ~this() {
637 
638 	  }
639 }
640 
641 // FIXME: finis this and decide if it is even a good idea
642 struct RefCountedSlice(T) {
643 	@disable this();
644 	HeapArrayBacking backing;
645 	this(HeapArray!T ha, size_t start, size_t end) {
646 		this.backing = ha.backing;
647 		this.start = start;
648 		this.end = end;
649 		backing.refcount++;
650 	}
651 
652 	this(this) {
653 		backing.refcount++;
654 	}
655 
656 	~this() {
657 		backing.refcount--;
658 		if(backing.refcount == 0)
659 			manual_free(backing);
660 	}
661 
662 	typeof(this) copy() {
663 		auto cp = HeapArray!T(this.backing.capacity);
664 		cp ~= this.slice();
665 		return cp;
666 	}
667 
668 	size_t start;
669 	size_t end;
670 	T opIndex(size_t idx, string file = __FILE__, size_t line = __LINE__) {
671 		return backing.at(idx + start, file, line);
672 	}
673 
674 	T[] slice() { return backing.slice[start .. end]; }
675 	alias slice this;
676 
677 }
678 
679 // introduces double indirection but it is easy
680 mixin template SimpleRefCounting(T, string freeCode) {
681 	final class RefCount {
682 		T payload;
683 		int refcount;
684 		this(T t) { payload = t; }
685 		~this() {
686 			assert(refcount == 0);
687 			mixin(freeCode);
688 		}
689 	}
690 
691 	private RefCount payload;
692 	@property T getPayload() { return payload.payload; }
693 	alias getPayload this;
694 	@disable this();
695 	this(T t) {
696 		payload = new RefCount(t);
697 	}
698 
699 	this(typeof(this) reference) {
700 		payload = reference.payload;
701 		payload.refcount++;
702 	}
703 
704 	this(this) {
705 		payload.refcount++;
706 	}
707 
708 	~this() {
709 		payload.refcount--;
710 		if(payload.refcount == 0)
711 			manual_free(payload);
712 	}
713 }
714 
715 struct HeapClosure(T) if(is(T == delegate)) {
716 	mixin SimpleRefCounting!(T, q{
717 		char[16] buffer;
718 		write("\nfreeing closure ", intToString(cast(size_t) payload.ptr, buffer),"\n");
719 		manual_free(payload.ptr);
720 	});
721 }
722 
723 HeapClosure!T makeHeapClosure(T)(T t) { // if(__traits(isNested, T)) {
724 	return HeapClosure!T(t);
725 }
726 
Suggestion Box / Bug Report