-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path_tensor.js
1549 lines (1068 loc) · 50.4 KB
/
_tensor.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/// need to know the variety of dimensions
// will work out the index positions based on the sizing and coordinates.
// will do quite a large multiplication.
// Could have image processing systems that return a tensor contain multiple images.
// Could express multiple convolutions in a tensor
// Uint8Tensor
// Tensor (TaType, dimensions)
// Could get the dimensions as a Typed Array.
// Shape
// Itself a TA
// Likely UInt32Array
// Rank - number of dimensions
// Maybe worth making Position data type.
// Tensor type
// let t = new Tensor(shape), v;
// t.set_i(idx, value);
// v = t.get_i(idx);
// t.set(ta_pos, value);
// v = t.get(ta_pos);
// --- Maybe:
// t.set_t(ta_pos, tensor) - writes the tensor to that position.
// t.get_t(ta_pos, shape) - gets that position.
// t.slice(pos, shape)
// Animations and frames would be a good way to think through and test a few tensors.
// Relatively low order, but an image could be a 3d tensor: x, y, pixel_component.
// Then this would be nice to convert to C++ and other optimization methods such as WASM.
// Tensor can't change shape
/* Possible much faster loop-action method
loop dimension 1
loop dimension 2
loop dimension 3
write them all, updating the offsets
Could be a lot faster if it means avoiding having to (re)calculate indexes.
However, calculating indexes could likely be done quickly, and its a reliable method.
*/
// Will do this in a very OO way.
// Objects will be used to represent all sorts of things.
// Slices, planes, variety of shapes.
// Numbers that are used for different kinds of mapping.
// Mapping between different shapes and coords systems.
// Want majority of operations to be really simple when carrying out various tensor ops.
// Will be interesting to make and use classes that represent 3D shapes, and then have algorithm that determines if any 1x1x1 cube within 3D space:
// 1) Has no intersection
// 2) Has partial intersection ?????
// 3) Has full coverage ?????
// 4) Has any intersection / overlap with the shape.
// 5) A single point is within the defined shape (easier to check).
// High resolution mapping / calculation?
// High resolution calculations available through (more) advanced maths?
// Generally, work out necessary calculations to do the operation(s).
// Then it will be fast to do the specific operations, given that we have the preceeding data. Data that gets precalculated in order to enable operations to qork quickly - data locality, and sequential processing of data.
// If possible, stages necessary to break down task so it can be parallelised.
const Tensor_Shape = require('./Tensor_Shape/_tensor-shape');
// Add to self?
// Subtrct from?
const ta_copy_to = (source, target) => {
const l = source.length;
for (let c = 0; c < l; c++) {
target[c] = source[c];
}
return target;
}
const self_add_ta = (ta1, ta2) => {
const l = ta1.length;
for (let c = 0; c < l; c++) {
ta1[c] = ta1[c] + (ta2[c] || 0);
}
return ta1;
}
const self_subtract_ta = (ta1, ta2) => {
const l = ta1.length;
for (let c = 0; c < l; c++) {
ta1[c] = ta1[c] - (ta2[c] || 0);
}
return ta1;
}
// Adds the two, new result. Do we want that?
const add_ta = (ta1, ta2) => {
const l = Math.max(ta1.length, ta2.length);
const res = new ta1.constructor(l);
for (let c = 0; c < l; c++) {
res[c] = (ta1[c] || 0) + (ta2[c] || 0);
}
return res;
}
const subtract_ta = (ta1, ta2) => {
const l = Math.max(ta1.length, ta2.length);
const res = new ta1.constructor(l);
for (let c = 0; c < l; c++) {
res[c] = (ta1[c] || 0) - (ta2[c] || 0);
}
return res;
}
// copy a ta?
// could be faster than slice
const read_only = (obj, prop_name, fn_get) => {
Object.defineProperty(obj, prop_name, {
get() {
return fn_get();
}
});
}
// Use the regular Map constructor to transform a 2D key-value Array into a map
var m_ta_constructors = new Map([
[Uint8ClampedArray, true],
[Uint8Array, true],
[Uint16Array, true],
[Uint32Array, true],
[Int8Array, true],
[Int16Array, true],
[Int32Array, true],
[Float32Array, true],
[Float64Array, true]
]);
/*
Position Deltas
---------------
Need to be able to use tensors to represent differences in index values, as well as differences in positions.
Index value diffs can be stored in a list, while position differences are in a matrix
Or position differences can be expressed as a shape, where the central pixel / position is right in the middle.
Then convert a position difference shape to the position deltas list
// And can / should use / support tensors for both.
Scalar tensors could even be useful in ensuring the types of numbers.
// tensor.shape_to_position_deltas
// only think we really need the TypedArray here
*/
// Could possibly have a Shape or Tensor_Shape class
// It would be useful in that it can be combines with another tensor's shape to work out the position offsets as one shape gets used as offsets while moving through that
// tensor.
// Tensor_Shape does seem like it would be useful.
// Can use that rather than full tensors for working out the shape.
// Possibly Tensor itself can hold Tensor_Shape which can do a few more calculations.
class Tensor {
// or if we give an instance of a typed array instead of a constructor?
// could check for the .constructor property
// Can also have a simpler positions iterator
// Carries out an increment operation on a ta of postion vectors.
// That increment loop will itself also be a useful pattern.
// (Byte_Tensor?)
constructor(shape, TAConstructor = Uint8Array) {
const rank = shape.length;
// but not in this variable
// rank may sometimes be 0 indexed....
// calculate the size
let tsize = 1;
/*
for (let c = 0; c < rank; c++) {
size = size * shape[c];
}
*/
//const size =
//
// coords being used usually
// indexes may be used instead for performance reasons.
// index change vectors corresponding to coordinate change vectors.
// compute rank multiplication tables
// dimension multipliers
// create the array of dimension multipliers
// d1, d1 * d2, d1 * d2 * d3;
// can have quick algorithm to compute them.
let dimension_factors = new Uint32Array(rank + 1);
dimension_factors[0] = 1;
// Need to create the dimension factors in the opposite direction.
// The right-most part has got the smallest result in the dimensions.
// reversed shape?
const rshape = shape.slice().reverse();
for (let c = 0; c < rank; c++) {
//console.log('rshape[c]', rshape[c]);
tsize = tsize * rshape[c];
dimension_factors[c + 1] = tsize;
}
dimension_factors.reverse();
const size = tsize;
//if (m_ta_constructors.has)
//dimension_factors.reverse();
dimension_factors = dimension_factors.slice(1);
//console.log('dimension_factors', dimension_factors);
const r_dimension_factors = dimension_factors.slice().reverse();
//throw 'stop';
if (!(shape instanceof Uint32Array)) {
shape = new Uint32Array(shape);
}
const ta = m_ta_constructors.has(TAConstructor) ? new TAConstructor(size) : TAConstructor;
if (ta.length !== size) {
throw 'Typed Array Size Mismatch';
}
read_only(this, 'rank', () => rank);
read_only(this, 'size', () => size);
read_only(this, 'shape', () => shape.slice());
read_only(this, 'dimension_factors', () => dimension_factors.slice());
// index offsets.
const shape_to_index_offsets = shape => {
// depends on own dimension factors
console.log('shape', shape);
console.log('shape.length', shape.length);
// Possibly return its own tensor.
// Maybe we don't need to return a tensor with its own shape.
// Then can use the index offsets for a floating window.
// Use another for loop, and read values, rather than have to assemble a new object.
// will need to go through all positions within the shape.
// or the expansion of the shape really.
// Will use this to make the index position deltas from a shape
// Will need to find the centre position within each dimension.
// Make a tensor with that shape?
// Then go through its positions
// Making a list of the deltas with reference to this / assigning that value to the tensor.
// Maybe we could have a Shape class?
// Not sure.
// iterate shape?
// get_shape_positions?
// then we can find the centre of all of them?
// then recentre the shape positions.
}
// dimension_factors
// Why enable access to the underlying ta? is that less secure?
// To enable faster operations with direct access, that don't require function calls.
// Want other objects, including other tensors, to be able to read the ta.
// In some cases it wouldn't be as secure. Can't rely on this data being either hidden or immutable.
read_only(this, 'ta', () => ta);
((ta, rank, dimension_factors, r_dimension_factors, size, shape) => {
const ta_limits = new Uint32Array(rank);
for (let c = 0; c < rank; c++) {
ta_limits[c] = shape[c] - 1;
}
// Do more within the constructor optimization, using local variables rather than this?
//console.log('TAConstructor.constructor ' + TAConstructor.constructor);
// Can check the contructor against all the known ta constructors,
// or have a js map object of the constructors.
// that could be fastest and idiomatic...?
// For some optimized usages, we want to construct a tensor using a typed array already given
// check it's the right size too.
//console.log('TAConstructor.prototype ' + TAConstructor.prototype);
// number of dimensions?
// "order", "degree", or "ndims." - TensorFlow
// scale? The components of a tensor with respect to a basis is an indexed array. The order of a tensor is the number of indices needed. Some texts may refer to the tensor order using the term degree or rank.
// order, degree, rank
// tf.rank
// Returns a 0-D int32 Tensor representing the rank of input
// tf.size
// Returns a 0-D Tensor representing the number of elements in input of type out_type. Defaults to tf.int32.
// Our rank function will return a number
// write_ta(idx)
// could be useful for copying over tensors
// can divide into different tas to write.
// write_tensor(idx)
// write will be a simple command from the index
const write_ta = (idx, p_ta) => {
const max = idx + p_ta.length;
let i = 0;
for (let c = idx; c < max; c++) {
ta[c] = p_ta[i++];
}
return this;
}
this.write_ta = write_ta;
// And we could have a read function.
// Could read it to an existing ta
// Some loops can use a single ta for values that get checked logically rather than used as results.
const read_ta = (idx, p_ta) => {
const max = idx + p_ta.length;
let i = 0;
for (let c = idx; c < max; c++) {
p_ta[i++] = ta[c];
}
return this;
}
this.read_ta = read_ta;
const offsets_tensor_to_index_offsets = ta_conv_shape => {
// go through the items in the offsets tensor.
//console.log('ot.shape', ot.shape);
// The last rank contains the offsets.
// calculate the rank multiplier up to the one before last.
const ot_s_l = ta_conv_shape.length;
//console.log('ot_s_l', ot_s_l);
const last_i = ot_s_l - 1;
// then iterate through at 2, getting the tas.
// can use temporary ta vectors.
// the number of results... ot.size
// multiply together the items in the shape to get the size.
let res = new Int32Array(ot.size / 2);
//console.log('last_i', last_i);
//console.log('ot.rank', ot.rank);
//console.log('ot.ta', ot.ta);
// And a result vector ta?
let idx = 0;
let i, c, l;
ot.each_pos_ta_at_or_below_rank(last_i, (pos, p_ta) => {
//console.log('p_ta', p_ta);
//console.log('ta', ta);
//console.log('dimension_factors', dimension_factors);
// Then calculate the index offsets from the pos.
// get_index_offset_from_ta_offset_vectors
/// calculate it based on the shape
// Need to use the cumulative sizes.
// Then make subclasss for this that process / represent images.
i = 0;
l = p_ta.length;
for (c = 0; c < l; c++) {
i += p_ta[c] * dimension_factors[c];
}
//console.log('i', i);
res[idx++] = i;
})
return res;
// each_pos_ta_at_or_below_index
// returns a typed array, which represents a flat reading of that position in the tensor.
// could try different types of copying to try for optimization.
// maybe slice would work, and also worth looking to creating Data_View objects, ArrayView or whatever it is.
}
const conv_index_offsets = (ta_conv_shape) => {
//console.log('ta_conv_shape', ta_conv_shape + '');
// create a temp tensor with that shape
// Create a new tensor with that
// an offsets tensor from a shape.
let new_shape = new Uint32Array(ta_conv_shape.length + 1);
for (let c = 0; c < ta_conv_shape.length; c++) {
new_shape[c] = ta_conv_shape[c]
}
new_shape[ta_conv_shape.length] = ta_conv_shape.length;
//console.log('new_shape', new_shape);
let sshape = new Tensor(new_shape, Int8Array);
// then iterate it at and below rank ta_conv_shape.length
// or try that
// not sure if ranks should be considered 0 indexed
// make the offsets tensor...
let ta_shape_centre_offsets = new Int32Array(ta_conv_shape.length);
for (var c = 0, l = ta_conv_shape.length; c < l; c++) {
ta_shape_centre_offsets[c] = Math.floor(ta_conv_shape[c] / 2);
}
let conv_length = 1;
for (var c = 0; c < ta_conv_shape.length; c++) {
conv_length *= ta_conv_shape[c];
}
// have a temporary pos?
const tpos = new Int32Array(ta_conv_shape.length);
const res = new Int32Array(conv_length);
let rw = 0;
sshape.each_pos_index_at_or_below_rank(ta_conv_shape.length, (pos, idx) => {
//console.log('pos, idx', [pos, idx]);
// size at and above rank...
// that's one of the coefficients list
// and subtract_ta
//console.log('1) pos', pos);
ta_copy_to(pos, tpos);
self_subtract_ta(tpos, ta_shape_centre_offsets);
//console.log('2) tpos', tpos);
//throw 'stop';
// butract 1/2 size in that dimension from it?
sshape.write_ta(idx, tpos);
// however, we can calculate the index offset
let v = 0;
for (let c = 0; c < ta_conv_shape.length; c++) {
v += shape[c + 1] * tpos[c];
}
//console.log('v', v);
res[rw++] = v;
})
//console.log('res', res);
// Then can use this for the convolution.
// Get convolution index offsets
// size at rank?
//
// Then get the offsets from these vectors.
//Then go through those, multiplying them by that part of the shape.
// Use the cumulative amounts
// tensor.get_tensor_shape_offsets(sshape);
//console.log('sshape', sshape + '');
return res;
// Then work out the offsets there.
//let ot = sshape.get_offsets_tensor();
//let ios = offsets_tensor_to_index_offsets(ot);
//console.log('ios', ios);
//const ts = new Tensor_Shape([3, 3]);
//const offsets = offsets_tensor_to_index_offsets(ta_conv_shape);
//console.log('offsets', offsets);
/*
let c = 0;
const mss = Date.now();
const mst = Date.now() - mss;
console.log('mst', mst);
console.log('c', c);
//const ot = ts.get_offsets_tensor();
//console.log('ot', ot);
// then we make an item offset multiple of ta_conv_shape multiplied together
let m = 1;
for (let c = 0, l = ta_conv_shape.length; c < l; c++) {
m *= ta_conv_shape[c];
}
console.log('m', m);
*/
// but we really want to
}
this.conv_index_offsets = conv_index_offsets;
// go with rank
// like TensorFlow
//uint MaxValue = 4294967295;
// uint MaxValue = 4,294,967,295; 4GB
// For the moment, size is limited to 4GB.
// size
// It's the smaller part on the right.
// So the dimension factors should be created in the opposite direction.
const pos_to_i = pos => {
// pos should have all dimensions?
// But for lower order pos?
let sum = 0;
const l = pos.length;
// Largest dimension factors should be on the left
for (let c = 0; c < l; c++) {
sum = sum + dimension_factors[c] * pos[c];
}
return sum;
// need to use dimension factors in calculations.
// consult dimension_factors.
/*
for (let d = rank - 1; d > 0; d--) {
// all the lower rank shape (dimension) sizes multiplied together.
}
*/
}
this.pos_to_i = pos_to_i;
// then i to pos
// a bit harder to calculate
// may work with remainders.
// subtraction of dimension_factors, looking at what is left over
const i_to_pos = i => {
// work backwards.
// work back through the dimension factors.
// from the second last one.
// work backwards, using math floor divided part, and remainders once divided by that.
// Or work up through the lower dimensions, 'accounting for' their components, and subtracting from the index.
let sum = i;
//console.log('');
//console.log('i', i);
//console.log('rank', rank);
const res = new Uint32Array(rank);
// need the dimension factors in the other dimension here.
//console.log('dimension_factors', dimension_factors);
//console.log('r_dimension_factors', r_dimension_factors);
//throw 'stop';
for (let c = 0; c < rank; c++) {
const a = Math.floor(sum / dimension_factors[c]);
sum -= a * dimension_factors[c];
//console.log('a', a);
res[c] = a;
}
return res;
}
this.i_to_pos = i_to_pos;
const toString = () => {
// if it's got rank 2
// display the values in a grid
// rank 3 - multiple grids
if (rank === 2) {
let arr_lines = [];
// each value at or below rank 0
// 0 indexed rank there.
// Then get the values within that position?
// Maybe read_ta would help.
// Reads at a position, knowing what the rank coefficient is there.
each_idx_at_or_below_rank(0, idx => {
// read ta_value there
// based on the position?
console.log('idx', idx);
})
} else {
// Could have an array at each part for rank 3.
// if its rank 3
// can show the array at each position in rank 1 (0i)
if (rank === 3) {
// But if we don't want the full pos, only want the first part of the pos.
//return 'Tensor[' + shape + ']';
// new item every 2 values
// new row every 3 items
const arr_all = [];
//let c2 = 0;
let arr_row = [];
let arr_item = [];
//let dc0 = dimension_factors[0];
//let dc1 = dimension_factors[1];
for (let c = 0, l = ta.length; c < l; c++) {
/*
if (c > 0) {
if (c % dc1 === 0) {
arr_row.push(arr_item);
arr_item = [];
}
if (c % dc0 === 0) {
arr_all.push(arr_row);
arr_row = [];
}
}*/
arr_item.push(ta[c]);
if (arr_item.length === 2) {
arr_row.push(arr_item);
arr_item = [];
}
if (arr_row.length === 3) {
arr_all.push(arr_row);
arr_row = [];
}
}
//console.log('arr_all', arr_all);
//return 'Tensor[' + shape + ']\n' + JSON.stringify(arr_all, null, 2)
return 'Tensor[' + shape + ']\n' + arr_all.map(x => JSON.stringify(x)).join('\n').split('[[').join('[').split(']]').join(']');
} else {
return 'Tensor[' + shape + ']';
}
}
}
this.toString = toString;
// Also various position iteration functions.
// Be able to get slices as Tensor objects.
/*
const set = (pos, value) => {
const i = pos_to_i(pos);
ta[i] = value;
return this;
}
*/
// Seems much like slice
// Not sure
const get_from_lower_rank = pos => {
console.log('get_from_lower_rank', pos);
// Should contain a tensor that contains the inner data.
// Maybe it gets a bit complex here
// Tensor gets the dimensions of the remaining ranks.
const l = pos.length;
const rem = shape.slice(l);
//console.log('pos l', l);
//console.log('rem', rem);
//let res = new Tensor(rem);
//console.log('res', res);
const i_begin = pos_to_i(pos);
//console.log('i_begin', i_begin);
//console.log('dimension_factors', dimension_factors);
let rank_size = 1;
for (let c = 0, l = rem.length; c < l; c++) {
rank_size *= rem[c];
}
//console.log('rank_size', rank_size);
//const i_end = i_begin + rank_size;
//console.log('i_end', i_end);
//const ta_res = ta.slice(i_begin, i_begin + rank_size);
//console.log('ta_res', ta_res);
//console.log('ta_res.length', ta_res.length);
//let res = new Tensor(rem, ta_res);
return new Tensor(rem, ta.slice(i_begin, i_begin + rank_size));
// then we create a new Tensor with the remaining shape and data.
// then need to iterate through the positions / indexes
// May be able to quickly copy from the ta if we know the right indexes.
// It should be a solid chunk of them.
// Could work out the positions in the ta
// Then doing a copy seems like the most efficient.
// Could even first copy / slice the ta, and use Tensor.from or a constructor type that allows the ta to be specified
}
// Need to be able to write sub-tensors in place.
// The trick will be to write smaller tensors.
// Need to extend functions from writing a 5x5 box into a 100x100 picture.
// Can't direct copy, could direct copy rows.
// Really need to work ut the corresponding index of each pixel/cell in each, and copy them accross.
// Can have fast index finding, and fast copying of data.
// So presumably this tensor maths system will be relatively fast.
// But would be considerably faster in a compiled language.
// Say we have multiple frames, and we want to set individual frames.
// Or drawing a box within a single frame.
// That could be writing a 2d tensor.
// set to lower rank looks a little more difficult.
// need to account for different shapes.
// I think we need to convert to and from coordinate systems.
const set = function (pos, value) {
const a = arguments,
l = a.length;
if (l === 1) {
value = a[0];
pos = null;
// or zeros throughout the whole thing.?
// May need to look at the value.length
}
if (Array.isArray(pos)) {
pos = new Uint32Array(pos);
}
// if the value is a number
// need to have a position
if (typeof value === 'number') {
// need to have a position.
if (pos !== null) {
ta[pos_to_i(pos)] = value;
} else {
throw 'NYI';
}
} else {
if (Array.isArray(value)) {
//console.log('arr value.length', value.length);
if (pos === null) {
if (value.length === ta.length) {
const l = value.length;
for (let c = 0; c < l; c++) {
ta[c] = value[c];
}
}
}
} else {
if (m_ta_constructors.has(value.constructor)) {
// typed array
//console.log('ta value.length', value.length);
if (pos === null) {
if (value.length === ta.length) {
const l = value.length;
for (let c = 0; c < l; c++) {
ta[c] = value[c];
}
} else {
throw 'NYI';
}
}
} else {
//
if (value instanceof Tensor) {
//console.log('value', value);
//console.log('value.shape', value.shape);
//console.log('value.rank', value.rank);
//throw 'stop';
const vta = value.ta;
// Can only do a direct copy where there is the right shape alignment.
// More checks that it will fit within the boundary of this tensor?
// Automatically clip it is it won't be?
// Better to iterate through all indexes of the value.
// Then get the coords from that value
let iv = 0,
lv = value.size;
for (iv = 0; iv < lv; iv++) {
//console.log('iv', iv);
/*
let posv = value.i_to_pos(iv);
//console.log('posv', posv);
//console.log('pos', pos);
// then add together the tas
//console.log('pos', pos);
const mypos = add(pos, posv);
//console.log('mypos', mypos);
const imy = pos_to_i(mypos);
//console.log('imy', imy);
ta[imy] = vta[iv];
*/
// Or could use some kind of index offset list / tensor.
ta[pos_to_i(add_ta(pos, value.i_to_pos(iv)))] = vta[iv];
}
// find the index of the pos
//const i = pos_to_i(pos);
// then need to iterate through that tensor's pos.
} else {
console.trace();
throw 'Unsupported value type ' + value;
}
}
}
}
}
//const set = (pos, value) => ta[pos_to_i(pos)] = value;
//
const get = pos => pos.length === rank ? ta[pos_to_i(pos)] : get_from_lower_rank(pos);
this.set = set;
this.get = get;
// a fast each_pos function
// Then we would like each pos at a specific dimension beginning.
// Though that maybe could be calculated with other addition and multiplication in a loop.
// Not requiring full iteration.
// Find the first item, then know the multiplication factors.
// Work through some simpler examples to find really fast performance.
// each pixel would wrap this or a similar function.
// Also want some kind of tensor extraction or inner iteration if possible.
// Each pos at dimension
// Won't iterate positions at higher (right) dimensions.
// like each pos, but have a lower maximum rank.
// each pos from rank
// only cb if the dimension being incremented is at or below the rank_limit / dimension_limit
// dimension is more JS-compatible terminology
// each_pos_at_or_below_rank
// each_pos_at_or_above_rank
// etc
// may as well have quite a lot of functions and use them as appropriate.
// Could definitely make a more optimized versions.
// Could we read tensors that are dataviews of this?
// Likely not, in many cases.
// Will need to copy when reshaping usually.
// And a padded version of this too...
// padded_each_pos