Browse Search Popular Register Upload Rules User list Login:
Search:
Why isn't this working?!

Image:
screenshot of the scene

Author: CPJchute

Group: Default

Filesize: 50.77 kB

Date added: 2025-09-05

Rating: 5

Downloads: 180

Views: 110

Comments: 4

Ratings: 1

Times favored: 0

Made with: Algodoo v2.2.4

Tags:

Scene tag

Details in scene.
Please log in to rate this scene
edit
Similar scenes
Title: Working Computer V0.1Beta
Rating: 5
Filesize: 426.91 kB
Downloads: 1321
Comments: 0
Ratings: 1
Date added: 2011/09/20 22:52:23
Made with: Algodoo v1.9.9b
Rating: rated 5
download
Title: Hydraulic cylinders (working) and working on texture
Rating: 5
Filesize: 40.99 kB
Downloads: 909
Comments: 2
Ratings: 1
Date added: 2009/07/07 11:58:51
Made with: Phun
Rating: rated 5
download
Title: Red car 2.0 with working chaingun
Rating: 5
Filesize: 81.1 kB
Downloads: 444
Comments: 2
Ratings: 1
Date added: 2009/12/30 01:48:01
Made with: Phun
Rating: rated 5
download
Title: working functional elev
Rating: 5
Filesize: 0.5 MB
Downloads: 507
Comments: 3
Ratings: 1
Date added: 2018/02/20 20:24:00
Made with: Algodoo v2.1.0
Rating: rated 5
download
Title: An *almost* Fully Working (Semi-Auto) Gun Inside Algodoo[Improved]
Rating: 5
Filesize: 27.48 kB
Downloads: 578
Comments: 0
Ratings: 1
Date added: 2023/08/10 07:50:06
Made with: Algodoo v2.1.0
Rating: rated 5
download
Title: help!
Rating: 5
Filesize: 18.37 kB
Downloads: 491
Comments: 0
Ratings: 1
Date added: 2008/08/11 23:34:52
Made with: Phun
Rating: rated 5
download
MS Edge Copilot AI prompt:
I'm building a neural network in Algodoo using Thyme scripting (see 284446_.html). My _forwardPass function is throwing List index out of bounds errors during weight access and array slicing. Please rewrite _forwardPass with full Thyme safety and structure, following Thyme rules (no // comments, use full ternary syntax, no +=, use _xFor, Thyme statements end in ";", and check all array bounds). Also include troubleshooting tips for common indexing mistakes in Thyme.

MS Edge Copilot AI partial response below:

_forwardPass := {
console.print("(DEBUG) forward pass started");

layerCount := string.length(_layerSizes);
layerCount > 1 ? {
_xFor(0, layerCount - 2, (i) => {
i >= 0 && i < layerCount - 1 ? {
newLayer := [];
j := i + 1;

targetSize := _layerSizes(j);
sourceSize := _layerSizes(i);

targetSize > 0 && sourceSize > 0 ? {
_xFor(0, targetSize - 1, (k) => {
neuron := 0;

_xFor(0, sourceSize - 1, (l) => {
(i >= 0 && i < string.length(_network)) &&
(l >= 0 && l < string.length(_network(i))) &&
(i >= 0 && i < string.length(_weights)) &&
(k * sourceSize + l >= 0 && k * sourceSize + l < string.length(_weights(i))) ? {
neuron = neuron + (_network(i)(l) * _weights(i)(k * sourceSize + l));
} : {
console.print("(WARN) Skipped invalid index at layer " + i + ", neuron " + l + ", weight index " + (k * sourceSize + l));
};
});

(i >= 0 && i < string.length(_biases)) &&
(k >= 0 && k < string.length(_biases(i))) ? {
neuron = neuron + _biases(i)(k);
} : {
console.print("(WARN) Skipped bias index " + k + " at layer " + i);
};

newLayer = newLayer ++ [neuron];
});

_network = _xSetArr(_network, j, newLayer);
} : {
console.print("(WARN) Invalid layer sizes at index " + i);
};
} : {
console.print("(WARN) Invalid layer index " + i);
};
});
} : {
console.print("(WARN) Not enough layers to perform forward pass");
};

_network;
};
Last edited at 2025/09/07 15:02:53 by s_noonan
If you want, I can also give you a **minimal debug mode toggle** so you can turn logging on/off without editing every `console.print` line. That way you can run it silently once it’s stable. Would you like me to add that?

Yes.

Got it — here’s the **no‑comment, Thyme‑safe `_forwardPass`** with:

- **Auto‑repair** for `_network`, `_weights`, `_biases`
- **Detailed debug logging**
- **Debug mode toggle** so you can silence or enable logs without editing every `console.print`

You just set `debugMode := 1;` to see logs, or `debugMode := 0;` to run quietly.

_forwardPass := {
debugMode := 1;

debugMode == 1 ? { console.print("(DEBUG) forward pass started") } : {};

layerCount := string.length(_layerSizes);

string.length(_network) != layerCount ? {
debugMode == 1 ? { console.print("(FIX) Adjusting _network to match _layerSizes length") } : {};
fixedNet := [];
_xFor(0, layerCount - 1, (li) => {
expectedSize := _layerSizes(li);
(li < string.length(_network)) ? {
layerArr := _network(li);
string.length(layerArr) != expectedSize ? {
debugMode == 1 ? { console.print("(FIX) Resizing _network(" + li + ") from " + string.length(layerArr) + " to " + expectedSize) } : {};
adj := [];
_xFor(0, expectedSize - 1, (ni) => {
adj = adj ++ [(ni < string.length(layerArr) ? layerArr(ni) : 0)];
});
fixedNet = fixedNet ++ [adj];
} : {
fixedNet = fixedNet ++ [layerArr];
};
} : {
debugMode == 1 ? { console.print("(FIX) Adding missing layer " + li + " size " + expectedSize) } : {};
newArr := [];
_xFor(0, expectedSize - 1, (ni) => { newArr = newArr ++ [0] });
fixedNet = fixedNet ++ [newArr];
};
});
_network = fixedNet;
} : {};

string.length(_weights) != layerCount - 1 ? {
debugMode == 1 ? { console.print("(FIX) Adjusting _weights count") } : {};
fixedW := [];
_xFor(0, layerCount - 2, (wi) => {
expectedLen := _layerSizes(wi) * _layerSizes(wi + 1);
(wi < string.length(_weights)) ? {
wArr := _weights(wi);
string.length(wArr) != expectedLen ? {
debugMode == 1 ? { console.print("(FIX) Resizing _weights(" + wi + ") from " + string.length(wArr) + " to " + expectedLen) } : {};
adj := [];
_xFor(0, expectedLen - 1, (wi2) => {
adj = adj ++ [(wi2 < string.length(wArr) ? wArr(wi2) : 0)];
});
fixedW = fixedW ++ [adj];
} : {
fixedW = fixedW ++ [wArr];
};
} : {
debugMode == 1 ? { console.print("(FIX) Adding missing weights for layer " + wi) } : {};
newW := [];
_xFor(0, expectedLen - 1, (wi2) => { newW = newW ++ [0] });
fixedW = fixedW ++ [newW];
};
});
_weights = fixedW;
} : {};

string.length(_biases) != layerCount - 1 ? {
debugMode == 1 ? { console.print("(FIX) Adjusting _biases count") } : {};
fixedB := [];
_xFor(0, layerCount - 2, (bi) => {
expectedLen := _layerSizes(bi + 1);
(bi < string.length(_biases)) ? {
bArr := _biases(bi);
string.length(bArr) != expectedLen ? {
debugMode == 1 ? { console.print("(FIX) Resizing _biases(" + bi + ") from " + string.length(bArr) + " to " + expectedLen) } : {};
adj := [];
_xFor(0, expectedLen - 1, (bi2) => {
adj = adj ++ [(bi2 < string.length(bArr) ? bArr(bi2) : 0)];
});
fixedB = fixedB ++ [adj];
} : {
fixedB = fixedB ++ [bArr];
};
} : {
debugMode == 1 ? { console.print("(FIX) Adding missing biases for layer " + bi) } : {};
newB := [];
_xFor(0, expectedLen - 1, (bi2) => { newB = newB ++ [0] });
fixedB = fixedB ++ [newB];
};
});
_biases = fixedB;
} : {};

layerCount > 1 ? {
_xFor(0, layerCount - 2, (i) => {
debugMode == 1 ? { console.print("(DEBUG) Processing layer " + i) } : {};
newLayer := [];
j := i + 1;
targetSize := _layerSizes(j);
sourceSize := _layerSizes(i);
debugMode == 1 ? { console.print("(DEBUG) sourceSize=" + sourceSize + ", targetSize=" + targetSize) } : {};
(targetSize > 0 && sourceSize > 0) ? {
_xFor(0, targetSize - 1, (k) => {
neuron := 0;
_xFor(0, sourceSize - 1, (l) => {
weightIndex := k * sourceSize + l;
debugMode == 1 ? { console.print("(DEBUG) Accessing: layer=" + i +
", neuronIndex=" + l +
", weightIndex=" + weightIndex +
", netLayerLen=" + string.length(_network(i)) +
", weightsLen=" + string.length(_weights(i))) } : {};
(i >= 0 && i < string.length(_network)) &&
(l >= 0 && l < string.length(_network(i))) &&
(i >= 0 && i < string.length(_weights)) &&
(weightIndex >= 0 && weightIndex < string.length(_weights(i))) ? {
neuron = neuron + (_network(i)(l) * _weights(i)(weightIndex));
} : {
debugMode == 1 ? { console.print("(WARN) Skipped invalid index at layer " + i +
", neuron " + l +
", weightIndex " + weightIndex) } : {};
};
});
debugMode == 1 ? { console.print("(DEBUG) Adding bias for neuron " + k) } : {};
(i >= 0 && i < string.length(_biases)) &&
(k >= 0 && k < string.length(_biases(i))) ? {
neuron = neuron + _biases(i)(k);
} : {
debugMode == 1 ? { console.print("(WARN) Skipped bias index " + k + " at layer " + i) } : {};
};
newLayer = newLayer ++ [neuron];
});
_network = _xSetArr(_network, j, newLayer);
} : {
debugMode == 1 ? { console.print("(WARN) Invalid layer sizes at index " + i) } : {};
};
});
} : {
debugMode == 1 ? { console.print("(WARN) Not enough layers to perform forward pass") } : {};
};

_network;
};
it seems the AI made things overcomplicated.i know this bcs when i made the forward pass work layer-by-layer, it worked.
AI is overcomplicated because it has extra code to show or handle the indexing error source. My recollection was that the AI code ran without error. Once you locate the source of the error, you can modify your original code. What happened when you ran the AI code?