|
|
|
|
|
|
|
|
|
|
|
|
SGT_FW
by Aku_Aku. 01/27/26 15:10
|
|
|
|
|
2 registered members (TipmyPip, qin),
7,573
guests, and 6
spiders. |
|
Key:
Admin,
Global Mod,
Mod
|
|
|
TorchBridge LineWorld Learner (Cuda Torch)
[Re: TipmyPip]
#489145
01/31/26 17:11
01/31/26 17:11
|
Joined: Sep 2017
Posts: 200
TipmyPip
OP
Member
|
OP
Member
Joined: Sep 2017
Posts: 200
|
This source file is a training demonstration that runs inside a Zorro plug in. Picture a workshop where a learner explores a hallway and learns which direction leads to a goal. The opening section hardens the include environment so Windows macros and Zorro naming do not clash with LibTorch headers. It includes LibTorch first, then includes the Zorro header with a temporary rename so the word at does not collide with the tensor namespace. Next comes LineWorld, a tiny environment. It stores a current position, a step counter, and a limit on how long an episode may run. Reset returns a fresh state. State builds a one hot tensor so only the current position is marked as active. Step applies an action, clamps the position within bounds, updates the counter, and returns the next state along with a reward and a done flag. Experience is recorded in a replay buffer, a circular notebook with fixed capacity. Each entry holds a transition: state, action, reward, next state, and termination. Sampling pulls random memories so training is less correlated and more stable. The learning brain is a neural network module with three linear layers and relu activations. An Agent owns a live network and a target network. The live network is optimized, while the target network is refreshed from time to time. Acting uses an epsilon greedy rule: sometimes it explores randomly, otherwise it chooses the action with the best predicted value. Training stacks a batch of tensors, gathers the value for each chosen action, computes a bootstrapped target from the target network, measures error with mean squared loss, and updates weights using Adam. Finally, the exported main function is what Zorro calls. It sets up logging, seeds randomness, runs training loops for two agents, prints progress, and wraps everything in exception handling so the host process remains safe. Small note about DLL loading: if compile64 bat does not add the LibTorch lib directory to PATH for Zorro runtime, Windows cannot find torch and c10 DLLs, and the plug in may fail to load properly. // ============================================================
// Zorro DLL: LibTorch demo using ONLY main() (no run())
// ============================================================
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
// #include <windows.h> // <-- REMOVED (not needed anymore if you don't call WinAPI here)
// --- 1) Include LibTorch FIRST ---
#include <torch/torch.h>
#include <vector>
#include <random>
#include <tuple>
#include <algorithm>
#include <cstdio>
#include <exception>
#include <cstdlib>
// --- 2) Include Zorro AFTER torch, rename Zorro's 'at' to avoid conflict ---
#define at zorro_at
#ifdef LOG
#undef LOG
#endif
#include <zorro.h>
#undef at
// --- 3) Cleanup common macro landmines ---
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif
#ifdef ref
#undef ref
#endif
#ifdef swap
#undef swap
#endif
#ifdef abs
#undef abs
#endif
// ---------- Tiny 1D environment ----------
struct LineWorld {
int n;
int pos = 0;
int maxSteps;
int steps = 0;
LineWorld(int n_, int maxSteps_) : n(n_), maxSteps(maxSteps_) {}
torch::Tensor reset() {
pos = 0;
steps = 0;
return state();
}
torch::Tensor state() const {
auto s = torch::zeros({n}, torch::kFloat32);
s.index_put_({pos}, 1.0f);
return s;
}
struct StepResult {
torch::Tensor next_state;
float reward;
bool done;
};
StepResult step(int action) {
if (action == 0) pos = std::max(0, pos - 1);
else pos = std::min(n - 1, pos + 1);
steps++;
bool reached = (pos == n - 1);
bool timeout = (steps >= maxSteps);
bool done = reached || timeout;
float reward = reached ? 1.0f : -0.01f;
return { state(), reward, done };
}
};
// ---------- Simple replay buffer ----------
struct Transition {
torch::Tensor s;
int a;
float r;
torch::Tensor ns;
bool done;
};
struct ReplayBuffer {
std::vector<Transition> data;
size_t capacity;
size_t idx = 0;
bool filled = false;
ReplayBuffer(size_t cap) : capacity(cap) {
data.resize(capacity);
}
void push(const Transition& t) {
data[idx] = t;
idx = (idx + 1) % capacity;
if (idx == 0) filled = true;
}
size_t size() const {
return filled ? capacity : idx;
}
bool can_sample(size_t batch) const {
return size() >= batch;
}
std::vector<Transition> sample(size_t batch, std::mt19937& rng) const {
std::uniform_int_distribution<size_t> dist(0, size() - 1);
std::vector<Transition> out;
out.reserve(batch);
for (size_t i = 0; i < batch; i++)
out.push_back(data[dist(rng)]);
return out;
}
};
// ---------- Q Network ----------
struct QNetImpl : torch::nn::Module {
torch::nn::Linear l1{nullptr}, l2{nullptr}, l3{nullptr};
QNetImpl(int stateDim, int actionDim) {
l1 = register_module("l1", torch::nn::Linear(stateDim, 64));
l2 = register_module("l2", torch::nn::Linear(64, 64));
l3 = register_module("l3", torch::nn::Linear(64, actionDim));
}
torch::Tensor forward(torch::Tensor x) {
x = torch::relu(l1(x));
x = torch::relu(l2(x));
x = l3(x);
return x;
}
};
TORCH_MODULE(QNet);
static void hard_update(QNet& dst, QNet& src)
{
torch::NoGradGuard ng;
auto sp = src->parameters();
auto dp = dst->parameters();
for (size_t i = 0; i < sp.size(); i++)
dp[i].copy_(sp[i]);
}
struct Agent {
QNet q;
QNet target;
torch::optim::Adam opt;
ReplayBuffer rb;
float gamma = 0.99f;
float eps = 1.0f;
float epsMin = 0.05f;
float epsDecay = 0.995f;
Agent(int stateDim, int actionDim, float lr, size_t replayCap)
: q(QNet(stateDim, actionDim)),
target(QNet(stateDim, actionDim)),
opt(q->parameters(), torch::optim::AdamOptions(lr)),
rb(replayCap)
{
hard_update(target, q);
}
int act(const torch::Tensor& s, std::mt19937& rng) {
std::uniform_real_distribution<float> u(0.0f, 1.0f);
if (u(rng) < eps) {
std::uniform_int_distribution<int> aDist(0, 1);
return aDist(rng);
}
torch::NoGradGuard ng;
auto qvals = q->forward(s);
return qvals.argmax().item<int>();
}
void decay_epsilon() {
eps *= epsDecay;
if (eps < epsMin) eps = epsMin;
}
void train_step(size_t batchSize, std::mt19937& rng) {
if (!rb.can_sample(batchSize)) return;
auto batch = rb.sample(batchSize, rng);
std::vector<torch::Tensor> ss, nss;
std::vector<int64_t> aa;
std::vector<float> rr, dd;
for (const auto& t : batch) {
ss.push_back(t.s);
nss.push_back(t.ns);
aa.push_back((int64_t)t.a);
rr.push_back(t.r);
dd.push_back(t.done ? 1.0f : 0.0f);
}
auto S = torch::stack(ss);
auto NS = torch::stack(nss);
auto A = torch::tensor(aa, torch::kInt64);
auto R = torch::tensor(rr, torch::kFloat32);
auto D = torch::tensor(dd, torch::kFloat32);
auto qvals = q->forward(S);
auto q_sa = qvals.gather(1, A.unsqueeze(1)).squeeze(1);
torch::Tensor next_q;
{
torch::NoGradGuard ng;
next_q = std::get<0>(target->forward(NS).max(1));
}
auto y = R + gamma * next_q * (1.0f - D);
auto loss = torch::mse_loss(q_sa, y);
opt.zero_grad();
loss.backward();
opt.step();
}
void update_target() { hard_update(target, q); }
};
// ============================================================
// Zorro calls this exported main() once (no run() used)
// ============================================================
extern "C" DLLFUNC int main()
{
// (Optional: keep thread limits even without DLL path management)
// _putenv_s("OMP_NUM_THREADS", "1");
// _putenv_s("MKL_NUM_THREADS", "1");
// _putenv_s("KMP_DUPLICATE_LIB_OK", "TRUE");
// torch::set_num_threads(1);
// torch::set_num_interop_threads(1);
// Make printing work in Zorro-hosted DLLs
setvbuf(stdout, nullptr, _IONBF, 0);
FILE* f = fopen("Log\\mt6409_torch.txt", "a");
if (f) { fprintf(f, "Started MT6409\n"); fflush(f); }
try {
printf("MT6409 Torch demo starting (main)...\n");
torch::manual_seed(0);
const int stateDim = 9;
const int actionDim = 2;
LineWorld env(stateDim, 30);
std::mt19937 rng(123);
Agent agent1(stateDim, actionDim, 1e-3f, 5000);
Agent agent2(stateDim, actionDim, 1e-3f, 5000);
const int episodes = 200;
const int targetUpdateEvery = 20;
const size_t batchSize = 64;
float avg1 = 0.0f, avg2 = 0.0f;
for (int ep = 1; ep <= episodes; ep++) {
// Agent 1
{
auto s = env.reset();
bool done = false;
float total = 0.0f;
while (!done) {
int a = agent1.act(s, rng);
auto res = env.step(a);
agent1.rb.push({s, a, res.reward, res.next_state, res.done});
agent1.train_step(batchSize, rng);
total += res.reward;
s = res.next_state;
done = res.done;
}
agent1.decay_epsilon();
avg1 = 0.95f * avg1 + 0.05f * total;
}
// Agent 2
{
auto s = env.reset();
bool done = false;
float total = 0.0f;
while (!done) {
int a = agent2.act(s, rng);
auto res = env.step(a);
agent2.rb.push({s, a, res.reward, res.next_state, res.done});
agent2.train_step(batchSize, rng);
total += res.reward;
s = res.next_state;
done = res.done;
}
agent2.decay_epsilon();
avg2 = 0.95f * avg2 + 0.05f * total;
}
if (ep % targetUpdateEvery == 0) {
agent1.update_target();
agent2.update_target();
}
if (ep % 25 == 0) {
printf("Episode %d | Agent1 avgR=%.4f eps=%.4f | Agent2 avgR=%.4f eps=%.4f\n",
ep, avg1, agent1.eps, avg2, agent2.eps);
if (f) {
fprintf(f, "Episode %d | Agent1 avgR=%.4f eps=%.4f | Agent2 avgR=%.4f eps=%.4f\n",
ep, avg1, agent1.eps, avg2, agent2.eps);
fflush(f);
}
}
}
printf("Done.\n");
if (f) { fprintf(f, "Done MT6409\n"); fclose(f); }
return 0;
}
catch (const c10::Error& e) {
printf("TORCH c10::Error: %s\n", e.what());
}
catch (const std::exception& e) {
printf("std::exception: %s\n", e.what());
}
catch (...) {
printf("Unknown exception in main().\n");
}
if (f) fclose(f);
return 1;
}
Last edited by TipmyPip; 01/31/26 17:13.
|
|
|
The Candle Oracle Lattice
[Re: TipmyPip]
#489155
Yesterday at 10:19
Yesterday at 10:19
|
Joined: Sep 2017
Posts: 200
TipmyPip
OP
Member
|
OP
Member
Joined: Sep 2017
Posts: 200
|
Picture a trading workshop that wakes up once for every new bar. A simple gatekeeper called Run stands outside. It does not reason, it only checks the session stage and forwards control. When the session begins it creates a single master object, and when the session ends it destroys it. All meaningful memory lives inside that master, so the workshop stays tidy. The master strategy acts like a conductor leading several sections. Each section is a component with a clear symbolic job. There is a pool of reusable nodes, a living tree that generates structured rhythms, three Markov memory books that learn candle transitions, a network of internal agents that evolve together, a logger that writes an audit trail, and a runtime steward that protects memory limits. The node pool is a warehouse of parts. It stores nodes in crates and also keeps a free list of returned nodes. When the tree needs a node, the pool first tries to reuse a returned one. If none are available, it opens a new crate and hands out the next unused node. When a node is no longer needed, it is returned to the free list instead of being discarded. This reduces allocation churn and gives the tree a steady supply of building blocks. The decision tree is a living sculpture, not a rule splitter. It grows down to a fixed depth. Each node carries a tone value and a pace value and may have children. On every evaluation, a node gathers the influences of its children and forms a local sum. It then forms a rhythmic phase driven by the current bar count and its pace. The node slowly moves its stored tone toward that phase. A depth weight table controls how strongly the update pulls at each depth, so shallow layers drift differently than deep layers. The tree keeps an index list so it can answer which node corresponds to which agent. It estimates node predictability from child dispersion, depth, and pace suitability. Predictability is cached per bar so repeated queries are fast. The tree assigns each agent a position on an imaginary ring derived from its mapped node. The strategy follows that ring to track which internal agent currently leads attention, producing a slowly drifting cycle phase that represents rotating focus. The Markov memory books form a trio. One book watches the current timeframe, another watches a higher timeframe sampled only on higher timeframe closes, and the third book records only when both timeframes agree on the same pattern and direction. Each book stores transition counts and row totals. On each update it converts the candle situation into a single state label. It computes many candle pattern indicators, selects the strongest absolute one, and accepts it only if it clears an acceptance threshold. If nothing is strong enough it records a none state. From the current row it estimates a bullish next chance and an uncertainty score, using smoothing so early data does not swing wildly. The network state is a council of internal agents. There are many agents, each with a current value, a previous value, and a squared copy used by the projection system. Each agent has a small neighbor list, and those links can be rewired over time. Every agent holds knobs that decide how its next value is formed. Knobs weight self influence, two neighbor influences, two global influences, a momentum term, a tree ensemble term, and an advisor term. Additional parameters shape two nonlinear transforms that process neighbor arguments into curved outputs, and a mode flag selects which transforms are used for the first and second neighbor on that bar. The projection system is a lantern that compresses the council into a smaller set of features. It uses a random sign table to mix squared agent values into projected channels. From the projected channels the strategy derives compact summaries that act as context drivers and feed the later update equations. The logger is the scribe. It writes a header once, then appends sampled lines that describe internal life. A line contains bar and context identifiers, neighbor references, tree location, node depth, predictability, advisor output, attention share, mode, and Markov mood summaries. When optional expression logging is enabled, the scribe can also store a readable recipe of how an agent updates. The runtime steward watches memory pressure and takes protective actions. It estimates fixed memory based on the known arrays, and it estimates tree memory by walking the tree. When memory approaches the budget, it first turns off plots and heavy logging. If pressure continues, it prunes the tree. Pruning is selective: at the frontier depth it keeps only the most important children and removes the rest, where importance blends predictability, signal amplitude, and depth. After pruning, the strategy reindexes the tree, rebuilds mappings, and refreshes its memory estimates so later decisions reflect the new structure. With the parts introduced, the bar routine follows a clear rhythm. When the session starts, initialization sets bar period and lookback, selects the asset, allocates network arrays and Markov tables, builds the tree, indexes it, maps each agent to a tree node, assigns ring positions, randomizes the projection table, computes the first projection, and performs a full initial rewire. Rewiring creates neighbor links and synthesizes knobs and parameters for every agent. A seed cache is cleared so advisory queries within a bar can be reused safely. During warmup bars, the strategy updates Markov books and keeps projection ready, but it avoids heavy learning steps and avoids order placement. During normal bars, the first act is to update the Markov trio. The current timeframe book updates every bar. On bars aligned with the higher timeframe close, the higher timeframe and relation books update as well. These updates refresh bullish chance and uncertainty, which later become gates and confidence modifiers. The acceptance threshold adapts slowly based on recent advisor hit quality and Markov uncertainty. Next, the strategy computes projection summaries and derives a context driver often called lambda. It also derives another driver from direct council aggregates, often called gamma. The two drivers are blended into a single internal signal, and the blend weight adapts based on higher timeframe uncertainty. Then come the chunked maintenance tasks. Rewiring is done on a slice of agents, not all at once. For each agent and each neighbor slot, candidate neighbors are sampled and scored using tree depth similarity, pace similarity, and node predictability. Clashes are avoided so neighbor slots do not duplicate. Links are sanitized to avoid invalid indices and self links. After links are set, the agent knobs and parameters are synthesized from a seed derived from an advisor output. Agent attention weights are also adjusted so agents with better hit histories gain a larger share, which also affects tree cycle leadership and ensemble weighting. State updates are also chunked. For each agent, a tree ensemble term is computed by comparing the agent to all others and weighting them by depth distance and pace distance, then boosting by predictability and attention shares. This yields a blended council influence and identifies the strongest partner. The strategy obtains an advisor output only when rotation allows it and memory headroom exists, and it may skip weak advisors when their hit record is poor. Neighbor inputs are transformed according to the agent mode, global terms are added, momentum is applied, tree term and advisor term are mixed in, and the new state is clamped to a safe band before being stored. After updating, hit rates are refreshed. The strategy compares the sign of the recent one bar return with the sign of stored advisor outputs and updates a smoothed hit estimate per agent. Hit estimates feed back into advisory gating and into the reliability boost used during synthesis. The tree cycle tracker is also advanced based on which agents currently hold the most attention. Finally, a trade signal is produced. The internal blended signal is gated by higher timeframe bullish chance. Long intent is allowed only when bullish chance is high enough, and short intent is allowed only when it is low enough. Relation uncertainty reduces confidence. The resulting signed strength becomes a position size, opposing positions are exited, and a new position is entered only when allowed. In symbolic terms, this system is a council guided by candle language. The Markov trio provides memory and mood, the tree provides structure and cycles, the network provides evolving voices, the pool provides material, the logger provides accountability, and the steward provides survival. The result is an adaptive engine that learns, rewires, and trades with guardrails while staying within its resource envelope. // Zorro64 C++ Strategy DLL - Alpha12 (FULL OOP Refactor)
// Compile as x64 DLL, include zorro.h
// ======================================================================
//
// Refactor goals achieved:
// - ALL prior “globals” moved into Alpha12Strategy (and its components).
// - Components: NodePool, DTree, MarkovChain, NetState, Logger, RuntimeManager.
// - No more static state inside functions (seedBar/haveSeed/seedVal moved into members).
// - run() remains a thin C bridge.
//
// Notes:
// - Logic is preserved: this is primarily an encapsulation/refactor.
// - Memory management remains malloc/free like your original (safe incremental step).
// ======================================================================
#define _CRT_SECURE_NO_WARNINGS
#include <zorro.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
// ======================================================================
// ================= USER CONFIG =================
#define ASSET_SYMBOL "EUR/USD"
#define BAR_PERIOD 5
#define TF_H1 12
#define MC_ACT 0.30 // initial threshold on |CDL| in [-1..1] to accept a pattern
#define PBULL_LONG_TH 0.60 // Markov gate for long
#define PBULL_SHORT_TH 0.40 // Markov gate for short
// Debug toggles
#define ENABLE_PLOTS 0
#define ENABLE_WATCH 0
// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES 3
#define MAX_DEPTH 4
#define NWIN 256
#define NET_EQNS 100
#define DEGREE 4
#define KPROJ 16
#define REWIRE_EVERY 127
#define CAND_NEIGH 8
// ===== LOGGING CONTROLS =====
#define LOG_EQ_TO_ONE_FILE 1
#define LOG_EXPR_TEXT 0
#define META_EVERY 4
#define LOG_EQ_SAMPLE NET_EQNS
#define EXPR_MAXLEN 512
#define LOG_EVERY 16
#define MC_EVERY 1
// ---- DTREE feature sizes ----
#define ADV_EQ_NF 19
#define ADV_PAIR_NF 12
// ================= Candles ? 122-state Markov =================
#define MC_NPAT 61
#define MC_STATES 123
#define MC_NONE 0
// ================= Runtime Memory / Accuracy Manager =================
#define MEM_BUDGET_MB 50
#define MEM_HEADROOM_MB 5
#define DEPTH_STEP_BARS 16
#define KEEP_CHILDREN_HI 2
#define KEEP_CHILDREN_LO 1
#define RUNTIME_MIN_DEPTH 2
// ===== Chunked rewire settings =====
#define REWIRE_BATCH_EQ_5M 24
#define REWIRE_BATCH_EQ_H1 64
#define REWIRE_MIN_BATCH 8
#define REWIRE_NORM_EVERY 1
#define REWIRE_MEM_SOFT (MEM_BUDGET_MB - 4)
#define REWIRE_MEM_HARD (MEM_BUDGET_MB - 1)
// ===== Chunked update settings =====
#define UPDATE_BATCH_EQ_5M 32
#define UPDATE_BATCH_EQ_H1 96
#define UPDATE_MIN_BATCH 8
#define UPDATE_MEM_SOFT (MEM_BUDGET_MB - 4)
#define UPDATE_MEM_HARD (MEM_BUDGET_MB - 1)
// ======================= Tight-memory switches =======================
#define TIGHT_MEM 1
#ifdef TIGHT_MEM
typedef float fvar;
typedef short i16;
typedef char i8;
#else
typedef var fvar;
typedef int i16;
typedef int i8;
#endif
// ======================================================================
// Forward declarations
class Alpha12Strategy;
static Alpha12Strategy* gAlpha12 = nullptr;
// ======================================================================
// ========================= NodePool (component) ========================
struct Node {
var v;
var r;
void* c;
int n;
int d;
};
struct NodeChunk {
NodeChunk* next;
int used;
int _pad;
Node nodes[256];
};
class NodePool {
NodeChunk* head = 0;
Node* freeList = 0;
public:
~NodePool() { freeAll(); }
Node* allocNode() {
if(freeList) {
Node* n = freeList;
freeList = (Node*)n->c;
n->c = 0; n->n = 0; n->d = 0; n->v = 0; n->r = 0;
return n;
}
if(!head || head->used >= 256) {
NodeChunk* ch = (NodeChunk*)malloc(sizeof(NodeChunk));
if(!ch) quit("Alpha12: OOM allocating NodeChunk");
memset(ch, 0, sizeof(NodeChunk));
ch->next = head;
head = ch;
}
if(head->used < 0 || head->used >= 256) quit("Alpha12: Corrupt node pool state");
return &head->nodes[head->used++];
}
void freeNode(Node* u) {
if(!u) return;
u->c = (void*)freeList;
freeList = u;
}
void freeAll() {
NodeChunk* ch = head;
while(ch) {
NodeChunk* nx = ch->next;
free(ch);
ch = nx;
}
head = 0;
freeList = 0;
}
};
// ======================================================================
// ========================= MarkovChain (component) =====================
class MarkovChain {
public:
int* Count = 0; // [MC_STATES*MC_STATES]
int* RowSum = 0; // [MC_STATES]
int Prev = -1;
int Cur = 0;
var PBullNext = 0.5;
var Entropy = 1.0;
var Alpha = 1.0; // Laplace smoothing
public:
void alloc() {
int NN = MC_STATES*MC_STATES;
int bytesMat = NN*sizeof(int);
int bytesRow = MC_STATES*sizeof(int);
Count = (int*)malloc(bytesMat);
RowSum = (int*)malloc(bytesRow);
if(!Count || !RowSum) quit("Alpha12: OOM in MarkovChain::alloc");
memset(Count,0,bytesMat);
memset(RowSum,0,bytesRow);
Prev = -1; Cur = 0; PBullNext = 0.5; Entropy = 1.0;
}
void freeMem() {
if(Count) free(Count);
if(RowSum) free(RowSum);
Count = RowSum = 0;
Prev = -1; Cur = 0; PBullNext = 0.5; Entropy = 1.0;
}
static int isBull(int s){
if(s<=0) return 0;
return ((s-1)%2)==1;
}
static int stateFromCDL(var* cdl /*len=61*/, var thr) {
int i, best=-1;
var besta=0;
for(i=0;i<MC_NPAT;i++){
var a = abs(cdl[i]);
if(a>besta){ besta=a; best=i; }
}
if(best<0) return MC_NONE;
if(besta < thr) return MC_NONE;
int bull = (cdl[best] > 0);
return 1 + 2*best + bull; // 1..122
}
int idx(int fr,int to) const { return fr*MC_STATES + to; }
void update(int sPrev,int sCur){
if(sPrev<0) return;
Count[idx(sPrev,sCur)]++;
RowSum[sPrev]++;
}
var prob(int s,int t) const {
var num = (var)Count[idx(s,t)] + Alpha;
var den = (var)RowSum[s] + Alpha*MC_STATES;
if(den<=0) return 1.0/MC_STATES;
return num/den;
}
// robust row stats
void rowStats(int s, var* outPBull, var* outEntropy) {
if(outPBull) *outPBull=0.5;
if(outEntropy) *outEntropy=1.0;
if(!Count || !RowSum) return;
if(!(Alpha > 0)) Alpha = 1.0;
if(s <= 0 || s >= MC_STATES) return;
if(RowSum[s] <= 0) return;
var den = (var)RowSum[s] + Alpha*(var)MC_STATES;
if(!(den > 0)) return;
var Z=0, pBull=0;
int t;
for(t=1;t<MC_STATES;t++){
var p = ((var)Count[idx(s,t)] + Alpha) / den;
Z += p;
if(isBull(t)) pBull += p;
}
if(!(Z>0)) return;
var H=0;
var Hmax = log((var)(MC_STATES-1));
if(!(Hmax > 0)) Hmax = 1.0;
for(t=1;t<MC_STATES;t++){
var p = (((var)Count[idx(s,t)] + Alpha) / den) / Z;
if(p>0) H += -p*log(p);
}
if(outPBull) *outPBull = pBull / Z;
if(outEntropy) *outEntropy = H / Hmax;
}
};
// ======================================================================
// =========================== Logger (component) ========================
class Alpha12Logger {
int wroteHeader = 0;
public:
void writeEqHeaderOnce(){
if(wroteHeader) return;
wroteHeader = 1;
file_append("Log\\Alpha12_eq_all.csv",
"Bar,Epoch,Ctx,EqCount,i,n1,n2,TreeId,Depth,Rate,Pred,Adv,Prop,Mode,WAdv,WTree,PBull,Entropy,MCState,ExprLen,ExprHash,tanhN,sinN,cosN\n");
}
static void strlcat_safe(string dst, string src, int cap) {
if(!dst || !src || cap <= 0) return;
int dl = (int)strlen(dst);
int sl = (int)strlen(src);
int room = cap - 1 - dl;
if(room <= 0){ if(cap > 0) dst[cap-1] = 0; return; }
int i;
for(i = 0; i < room && i < sl; i++) dst[dl + i] = src[i];
dst[dl + i] = 0;
}
static int countSubStr(string s, string sub){
if(!s || !sub) return 0;
int n=0; string p=s; int sublen = (int)strlen(sub); if(sublen<=0) return 0;
while((p=strstr(p,sub))){ n++; p += sublen; }
return n;
}
static int djb2_hash(string s){
int h = 5381, c, i = 0;
if(!s) return h;
while((c = s[i++])) h = ((h<<5)+h) ^ c;
return h & 0x7fffffff;
}
void appendEqMetaLine(
int bar, int epoch, int ctx,
int i, int n1, int n2, int tid, int depth, var rate, var pred, var adv, var prop, int mode,
var wadv, var wtree, var pbull, var ent, int mcstate, string expr)
{
if(i >= LOG_EQ_SAMPLE) return;
int eLen = 0, eHash = 0, cT = 0, cS = 0, cC = 0;
if(expr){
eLen = (int)strlen(expr);
eHash = (int)djb2_hash(expr);
cT = countSubStr(expr,"tanh(");
cS = countSubStr(expr,"sin(");
cC = countSubStr(expr,"cos(");
} else {
eHash = (int)djb2_hash("");
}
file_append("Log\\Alpha12_eq_all.csv",
strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.4f,%.4f,%.4f,%.4f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth,
rate, pred, adv, prop, mode, wadv, wtree, pbull, ent,
mcstate, eLen, eHash, cT, cS, cC));
}
};
// ======================================================================
// =========================== NetState (component) ======================
class NetState {
public:
int N = NET_EQNS;
int D = DEGREE;
int K = KPROJ;
// core
var* State = 0;
var* Prev = 0;
var* StateSq = 0;
// adjacency & projection
i16* Adj = 0;
fvar* RP = 0;
fvar* Z = 0;
i8* Mode= 0;
// weights & params
fvar* WSelf=0; fvar* WN1=0; fvar* WN2=0; fvar* WGlob1=0; fvar* WGlob2=0; fvar* WMom=0; fvar* WTree=0; fvar* WAdv=0;
fvar *A1x=0,*A1lam=0,*A1mean=0,*A1E=0,*A1P=0,*A1i=0,*A1c=0;
fvar *A2x=0,*A2lam=0,*A2mean=0,*A2E=0,*A2P=0,*A2i=0,*A2c=0;
fvar *G1mean=0,*G1E=0,*G2P=0,*G2lam=0;
fvar* TreeTerm = 0;
i16* TopEq = 0;
fvar* TopW = 0;
i16* EqTreeId = 0;
fvar* TAlpha=0;
fvar* TBeta =0;
fvar* PropRaw=0;
fvar* Prop =0;
// expression buffers (optional)
string* Sym = 0;
int SymFreed = 0;
// Hit-rate
fvar* HitEW = 0;
int* HitN = 0;
fvar* AdvPrev = 0;
var Ret1 = 0;
// Projection cache guards
int ProjBar = -1;
int ProjK = -1;
// effective projection dim
int Keff = KPROJ;
public:
void allocate() {
int n=N, d=D, k=K;
State = (var*)malloc(n*sizeof(var));
Prev = (var*)malloc(n*sizeof(var));
StateSq = (var*)malloc(n*sizeof(var));
Adj = (i16*)malloc(n*d*sizeof(i16));
RP = (fvar*)malloc(k*n*sizeof(fvar));
Z = (fvar*)malloc(k*sizeof(fvar));
Mode = (i8*)malloc(n*sizeof(i8));
WSelf=(fvar*)malloc(n*sizeof(fvar));
WN1=(fvar*)malloc(n*sizeof(fvar));
WN2=(fvar*)malloc(n*sizeof(fvar));
WGlob1=(fvar*)malloc(n*sizeof(fvar));
WGlob2=(fvar*)malloc(n*sizeof(fvar));
WMom=(fvar*)malloc(n*sizeof(fvar));
WTree=(fvar*)malloc(n*sizeof(fvar));
WAdv=(fvar*)malloc(n*sizeof(fvar));
A1x=(fvar*)malloc(n*sizeof(fvar)); A1lam=(fvar*)malloc(n*sizeof(fvar)); A1mean=(fvar*)malloc(n*sizeof(fvar));
A1E=(fvar*)malloc(n*sizeof(fvar)); A1P=(fvar*)malloc(n*sizeof(fvar)); A1i=(fvar*)malloc(n*sizeof(fvar)); A1c=(fvar*)malloc(n*sizeof(fvar));
A2x=(fvar*)malloc(n*sizeof(fvar)); A2lam=(fvar*)malloc(n*sizeof(fvar)); A2mean=(fvar*)malloc(n*sizeof(fvar));
A2E=(fvar*)malloc(n*sizeof(fvar)); A2P=(fvar*)malloc(n*sizeof(fvar)); A2i=(fvar*)malloc(n*sizeof(fvar)); A2c=(fvar*)malloc(n*sizeof(fvar));
G1mean=(fvar*)malloc(n*sizeof(fvar)); G1E=(fvar*)malloc(n*sizeof(fvar));
G2P=(fvar*)malloc(n*sizeof(fvar)); G2lam=(fvar*)malloc(n*sizeof(fvar));
TAlpha=(fvar*)malloc(n*sizeof(fvar));
TBeta =(fvar*)malloc(n*sizeof(fvar));
TreeTerm=(fvar*)malloc(n*sizeof(fvar));
TopEq=(i16*)malloc(n*sizeof(i16));
TopW =(fvar*)malloc(n*sizeof(fvar));
PropRaw=(fvar*)malloc(n*sizeof(fvar));
Prop =(fvar*)malloc(n*sizeof(fvar));
EqTreeId=(i16*)malloc(n*sizeof(i16));
if(LOG_EXPR_TEXT) Sym = (string*)malloc(n*sizeof(char*)); else Sym = 0;
// init adjacency
{ int t; for(t=0;t<n*d;t++) Adj[t] = -1; }
// init core arrays
{
int i;
for(i=0;i<n;i++){
State[i]=random(); Prev[i]=State[i]; StateSq[i]=State[i]*State[i];
Mode[i]=0;
WSelf[i]=0.5f; WN1[i]=0.2f; WN2[i]=0.2f;
WGlob1[i]=0.1f; WGlob2[i]=0.1f; WMom[i]=0.05f;
WTree[i]=0.15f; WAdv[i]=0.15f;
A1x[i]=1; A1lam[i]=0.1f; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
A2x[i]=1; A2lam[i]=0.1f; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;
G1mean[i]=1.0f; G1E[i]=0.001f;
G2P[i]=0.6f; G2lam[i]=0.3f;
TAlpha[i]=0.8f; TBeta[i]=25.0f;
TreeTerm[i]=0;
TopEq[i]=-1; TopW[i]=0;
PropRaw[i]=1; Prop[i]=(fvar)(1.0/n);
if(LOG_EXPR_TEXT){
Sym[i] = (char*)malloc(EXPR_MAXLEN);
if(Sym[i]) strcpy(Sym[i],"");
}
}
}
// hit-rate arrays
HitEW = (fvar*)malloc(n*sizeof(fvar));
HitN = (int*)malloc(n*sizeof(int));
AdvPrev = (fvar*)malloc(n*sizeof(fvar));
{ int i; for(i=0;i<n;i++){ HitEW[i]=0.5f; HitN[i]=0; AdvPrev[i]=0; } }
// projection guards
ProjBar = -1; ProjK = -1;
}
void freeAll() {
int i;
if(State)free(State);
if(Prev)free(Prev);
if(StateSq)free(StateSq);
if(Adj)free(Adj);
if(RP)free(RP);
if(Z)free(Z);
if(Mode)free(Mode);
if(WSelf)free(WSelf);
if(WN1)free(WN1);
if(WN2)free(WN2);
if(WGlob1)free(WGlob1);
if(WGlob2)free(WGlob2);
if(WMom)free(WMom);
if(WTree)free(WTree);
if(WAdv)free(WAdv);
if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean);
if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean);
if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);
if(G1mean)free(G1mean); if(G1E)free(G1E);
if(G2P)free(G2P); if(G2lam)free(G2lam);
if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);
if(TreeTerm)free(TreeTerm);
if(TopEq)free(TopEq);
if(TopW)free(TopW);
if(EqTreeId)free(EqTreeId);
if(PropRaw)free(PropRaw);
if(Prop)free(Prop);
if(Sym){
for(i=0;i<N;i++) if(Sym[i]) free(Sym[i]);
free(Sym);
}
Sym = 0;
if(HitEW) free(HitEW);
if(HitN) free(HitN);
if(AdvPrev) free(AdvPrev);
// null everything
State=Prev=StateSq=0; Adj=0; RP=0; Z=0; Mode=0;
WSelf=WN1=WN2=WGlob1=WGlob2=WMom=WTree=WAdv=0;
A1x=A1lam=A1mean=A1E=A1P=A1i=A1c=0;
A2x=A2lam=A2mean=A2E=A2P=A2i=A2c=0;
G1mean=G1E=G2P=G2lam=0;
TAlpha=TBeta=0;
TreeTerm=0; TopEq=0; TopW=0; EqTreeId=0;
PropRaw=0; Prop=0;
HitEW=0; HitN=0; AdvPrev=0;
}
void randomizeRP(){
int k=K, n=N;
int kk,j;
for(kk=0;kk<k;kk++)
for(j=0;j<n;j++)
RP[kk*n+j] = ifelse(random(1) < 0.5, -1.0, 1.0);
}
int keffClamped() const {
int kk = Keff;
if(kk < 0) kk = 0;
if(kk > K) kk = K;
return kk;
}
void computeProjection(){
if(!RP || !Z || !StateSq) return;
int kk = keffClamped();
if(ProjBar == Bar && ProjK == kk) return;
int k, j;
for(k=0;k<kk;k++){
var acc=0;
for(j=0;j<N;j++) acc += (var)RP[k*N + j] * StateSq[j];
Z[k] = (fvar)acc;
}
ProjBar = Bar;
ProjK = kk;
}
void sanitizeAdjacency(){
if(!Adj) return;
int i,d;
for(i=0;i<N;i++){
for(d=0;d<D;d++){
i16* p = &Adj[i*D + d];
if(*p < 0 || *p >= N || *p == i){
int r = (int)random(N);
if(r==i) r = (r+1)%N;
*p = (i16)r;
}
}
if(D >= 2 && Adj[i*D+0] == Adj[i*D+1]){
int r2 = (Adj[i*D+1] + 1) % N;
if(r2 == i) r2 = (r2+1)%N;
Adj[i*D+1] = (i16)r2;
}
}
}
int adjSafe(int i, int d) const {
if(!Adj || N<=1 || D<=0) return 0;
if(d<0) d=0;
if(d>=D) d = d % D;
int v = Adj[i*D + d];
if(v<0 || v>=N || v==i) v = (i+1)%N;
return v;
}
void normalizeProportions(){
int i;
var s=0;
for(i=0;i<N;i++) s += PropRaw[i];
if(s<=0){
for(i=0;i<N;i++) Prop[i] = (fvar)(1.0/N);
return;
}
for(i=0;i<N;i++) Prop[i] = (fvar)(PropRaw[i]/s);
}
};
// ======================================================================
// ============================= DTree (component) =======================
class DTree {
public:
NodePool* pool = 0;
Node* Root = 0;
Node** TreeIdx = 0;
int TreeN = 0;
int TreeCap = 0;
var DTreeExp = 0;
var* DepthW = 0;
var DepthExpLast = -1.0;
Node DummyNode;
// Predictability cache
var* PredNode = 0;
int PredLen = 0;
int PredCap = 0;
int PredCacheBar = -1;
// equation-cycle angles
var* EqTheta = 0;
int LeadEq = -1;
var LeadTh = 0;
var CycPh = 0;
var CycSpd = 0;
public:
DTree() { memset(&DummyNode,0,sizeof(DummyNode)); }
void bindPool(NodePool* p){ pool = p; }
void allocDepthLUT(){
int sz = MAX_DEPTH + 1;
if(!DepthW) DepthW = (var*)malloc(sz*sizeof(var));
if(!DepthW) quit("Alpha12: OOM DepthW");
}
void freeAll(){
if(Root) freeTree(Root);
Root = 0;
if(TreeIdx) free(TreeIdx);
TreeIdx = 0; TreeN=0; TreeCap=0;
if(DepthW) free(DepthW);
DepthW=0;
if(PredNode) free(PredNode);
PredNode=0; PredLen=0; PredCap=0;
if(EqTheta) free(EqTheta);
EqTheta=0;
}
// Tree byte size
int tree_bytes(Node* u){
if(!u) return 0;
int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
int sz_node = 2*SZV + SZP + 2*SZI;
int total = sz_node;
if(u->n > 0 && u->c) total += u->n * SZP;
int i;
for(i=0;i<u->n;i++) total += tree_bytes(((Node**)u->c)[i]);
return total;
}
void refreshDepthW(){
if(!DepthW) return;
int d;
for(d=0; d<=MAX_DEPTH; d++) DepthW[d] = 1.0 / pow(d+1, DTreeExp);
DepthExpLast = DTreeExp;
}
Node* createNode(int depth){
if(!pool) quit("Alpha12: DTree pool not bound");
Node* u = pool->allocNode();
if(!u) return 0;
u->v = random();
u->r = 0.01 + 0.02*depth + random(0.005);
u->d = depth;
if(depth > 0){
u->n = 1 + (int)random(MAX_BRANCHES);
u->c = (void**)malloc(u->n*sizeof(void*));
if(!u->c){ u->n=0; u->c=0; return u; }
int i;
for(i=0;i<u->n;i++){
((Node**)u->c)[i] = createNode(depth-1);
}
} else {
u->n=0; u->c=0;
}
return u;
}
var evaluateNode(Node* u){
if(!u) return 0;
var sum=0; int i;
for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
if(DepthExpLast < 0 || abs(DTreeExp - DepthExpLast) > 1e-9) refreshDepthW();
var phase = sin(u->r * Bar + sum);
var weight = DepthW[u->d];
u->v = (1 - weight)*u->v + weight*phase;
return u->v;
}
void freeTree(Node* u){
if(!u) return;
int i;
for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]);
if(u->c) free(u->c);
pool->freeNode(u);
}
void pushTreeNode(Node* u){
if(TreeN >= TreeCap){
int newCap = TreeCap*2;
if(newCap < 64) newCap = 64;
TreeIdx = (Node**)realloc(TreeIdx, newCap*sizeof(Node*));
TreeCap = newCap;
}
TreeIdx[TreeN++] = u;
}
void indexTreeDFS(Node* u){
if(!u) return;
pushTreeNode(u);
int i;
for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]);
}
void ensurePredCache(){
if(PredCacheBar != Bar){
if(PredNode){
int i;
for(i=0;i<PredLen;i++) PredNode[i] = -2;
}
PredCacheBar = Bar;
}
}
var nodePredictability(Node* t){
if(!t) return 0.5;
var disp = 0; int n=t->n, i, cnt=0;
if(t->c){
for(i=0;i<n;i++){
Node* c = ((Node**)t->c)[i];
if(c){ disp += abs(c->v - t->v); cnt++; }
}
if(cnt>0) disp /= cnt;
}
var depthFac = 1.0/(1 + t->d);
var rateBase = 0.01 + 0.02*t->d;
var rateFac = exp(-25.0*abs(t->r - rateBase));
var p = 0.5*(depthFac + rateFac);
p = 0.5*p + 0.5*(1.0 + (-disp));
if(p<0) p=0; if(p>1) p=1;
return p;
}
var predByTid(int tid){
if(!TreeIdx || tid < 0 || tid >= TreeN || !TreeIdx[tid]) return 0.5;
ensurePredCache();
if(PredNode && tid < PredLen && PredNode[tid] > -1.5) return PredNode[tid];
Node* t = TreeIdx[tid];
var p = nodePredictability(t);
if(PredNode && tid < PredLen) PredNode[tid] = p;
return p;
}
Node* treeAt(int tid){
if(!TreeIdx || tid < 0 || tid >= TreeN || !TreeIdx[tid]) return &DummyNode;
return TreeIdx[tid];
}
int safeTreeIndexFromEq(int eqTreeId, int treeN){
int denom = ifelse(treeN>0, treeN, 1);
int tid = eqTreeId;
if(tid < 0) tid = 0;
if(denom > 0) tid = tid % denom;
if(tid < 0) tid = 0;
return tid;
}
void maybeShrinkTreeIdx(){
if(!TreeIdx) return;
if(TreeCap > 64 && TreeN < (TreeCap>>1)){
int newCap = (TreeCap>>1);
if(newCap < 64) newCap = 64;
TreeIdx = (Node**)realloc(TreeIdx, newCap*sizeof(Node*));
TreeCap = newCap;
}
}
void resizePredCacheToTree(){
PredLen = TreeN; if(PredLen <= 0) PredLen = 1;
if(PredLen > PredCap){
if(PredNode) free(PredNode);
PredNode = (var*)malloc(PredLen*sizeof(var));
PredCap = PredLen;
}
PredCacheBar = -1;
}
// equation ring angles (needs EqTreeId mapping provided externally)
void refreshEqAngles(const i16* eqTreeId, int eqN){
if(!EqTheta) EqTheta = (var*)malloc(eqN*sizeof(var));
if(!EqTheta) quit("Alpha12: OOM EqTheta");
var twoPi = 2.*3.141592653589793;
var denom = ifelse(TreeN>0,(var)TreeN,1.0);
int i;
for(i=0;i<eqN;i++){
int tid = safeTreeIndexFromEq((int)eqTreeId[i], TreeN);
var u = ((var)tid)/denom;
EqTheta[i] = twoPi*u;
}
}
static var pi(){ return 3.141592653589793; }
static var wrapPi(var a){
while(a <= -pi()) a += 2.*pi();
while(a > pi()) a -= 2.*pi();
return a;
}
static var angDiff(var a, var b){ return wrapPi(b - a); }
void updateEquationCycle(const fvar* prop, int N){
if(!EqTheta){ CycPh = wrapPi(CycPh); return; }
int i, bestI=0;
var bestP=-1;
for(i=0;i<N;i++){
var p = (var)prop[i];
if(p > bestP){ bestP=p; bestI=i; }
}
var th = EqTheta[bestI];
var d = angDiff(LeadTh, th);
CycSpd = 0.9*CycSpd + 0.1*d;
CycPh = wrapPi(CycPh + CycSpd);
LeadEq = bestI;
LeadTh = th;
}
// --- prune & grow helpers (direct lift) ---
var nodeImportance(Node* u){
if(!u) return 0;
var amp = abs(u->v); if(amp>1) amp=1;
var p = nodePredictability(u);
var depthW = 1.0/(1.0 + u->d);
var imp = (0.6*p + 0.4*amp) * depthW;
return imp;
}
Node* createLeafDepth(int d){
Node* u = pool->allocNode();
if(!u) return 0;
u->v = random();
u->r = 0.01 + 0.02*d + random(0.005);
u->n = 0;
u->c = 0;
u->d = d;
return u;
}
void growSelectiveAtDepth(Node* u, int frontierDepth, int addK){
if(!u) return;
if(u->d == frontierDepth){
int want = addK; if(want<=0) return;
int oldN=u->n, newN=oldN+want;
Node** Cnew = (Node**)malloc(newN*sizeof(void*));
if(!Cnew) return;
if(oldN>0 && u->c) memcpy(Cnew,u->c,oldN*sizeof(void*));
int i;
for(i=oldN;i<newN;i++) Cnew[i] = createLeafDepth(frontierDepth-1);
if(u->c) free(u->c);
u->c = Cnew;
u->n = newN;
return;
}
int j;
for(j=0;j<u->n;j++) growSelectiveAtDepth(((Node**)u->c)[j],frontierDepth,addK);
}
void freeChildAt(Node* parent, int idx){
if(!parent || !parent->c) return;
Node** C = (Node**)parent->c;
freeTree(C[idx]);
int i;
for(i=idx+1;i<parent->n;i++) C[i-1]=C[i];
parent->n--;
if(parent->n==0){ free(parent->c); parent->c=0; }
}
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK){
if(!u) return;
if(u->d == targetDepth-1 && u->n > 0){
int n=u->n, i, kept=0;
int mark[16]; for(i=0;i<16;i++) mark[i]=0;
int iter;
for(iter=0; iter<keepK && iter<n; iter++){
int bestI=-1; var bestImp=-1;
for(i=0;i<n;i++){
if(i<16 && mark[i]==1) continue;
var imp = nodeImportance(((Node**)u->c)[i]);
if(imp > bestImp){ bestImp=imp; bestI=i; }
}
if(bestI>=0 && bestI<16){ mark[bestI]=1; kept++; }
}
for(i=n-1;i>=0;i--) if(i<16 && mark[i]==0) freeChildAt(u,i);
return;
}
int j; for(j=0;j<u->n;j++) pruneSelectiveAtDepth(((Node**)u->c)[j],targetDepth,keepK);
}
};
// ======================================================================
// ======================== RuntimeManager (component) ===================
class RuntimeManager {
public:
int MemFixedBytes = 0;
int TreeBytesCached = 0;
int ShedStage = 0;
int LastDepthActBar = -999999;
int ChartsOff = 0;
int LogsOff = 0;
int RT_TreeMaxDepth = MAX_DEPTH;
// Accuracy sentinel
var ACC_mx=0, ACC_my=0, ACC_mx2=0, ACC_my2=0, ACC_mxy=0;
var AccCorr=0;
var AccBase=0;
int HaveBase=0;
// Elastic depth tuner
var UtilBefore=0, UtilAfter=0;
int TunePending=0;
int TuneStartBar=0;
int TuneAction=0;
var DTreeExpStep = 0.05;
int DTreeExpDir = 1;
public:
int mem_bytes_est() const { return MemFixedBytes + TreeBytesCached; }
int mem_mb_est() const { return mem_bytes_est()/(1024*1024); }
void recalcTreeBytes(DTree& dt){ TreeBytesCached = dt.tree_bytes(dt.Root); }
void computeMemFixedBytes(const NetState& net, const DTree& dt, int includeExprBytes){
int N=net.N, D=net.D, K=net.K;
int SZV=sizeof(var), SZF=sizeof(fvar), SZI16=sizeof(i16), SZI8=sizeof(i8), SZP=sizeof(void*);
int b=0;
b += N*SZV*2; // State, Prev
b += N*SZV; // StateSq
b += N*D*SZI16; // Adj
b += N*SZI16; // EqTreeId
b += N*SZI8; // Mode
b += K*N*SZF; // RP
b += K*SZF; // Z
b += N*SZF*(8); // weights
b += N*SZF*(7+7); // A1*, A2*
b += N*SZF*(2+2); // G1mean,G1E,G2P,G2lam
b += N*SZF*(2); // TAlpha,TBeta
b += N*SZF*(1); // TreeTerm
b += N*(SZI16 + SZF); // TopEq,TopW
b += N*SZF*2; // PropRaw,Prop
b += N*SZF; // HitEW
b += N*SZF; // AdvPrev
b += N*sizeof(int); // HitN
// Markov: caller adds separately if desired (we do it in strategy since there are 3 chains)
b += dt.TreeCap*SZP; // Tree index capacity
if(includeExprBytes) b += N*EXPR_MAXLEN;
MemFixedBytes = b;
}
void shed_zero_cost_once(){
if(ShedStage > 0) return;
set(PLOTNOW|OFF);
ChartsOff = 1;
LogsOff = 1;
ShedStage = 1;
}
void acc_update(var x, var y){
var a=0.01;
ACC_mx=(1-a)*ACC_mx + a*x;
ACC_my=(1-a)*ACC_my + a*y;
ACC_mx2=(1-a)*ACC_mx2 + a*(x*x);
ACC_my2=(1-a)*ACC_my2 + a*(y*y);
ACC_mxy=(1-a)*ACC_mxy + a*(x*y);
var vx = ACC_mx2 - ACC_mx*ACC_mx;
var vy = ACC_my2 - ACC_my*ACC_my;
var cv = ACC_mxy - ACC_mx*ACC_my;
if(vx>0 && vy>0) AccCorr = cv / sqrt(vx*vy);
else AccCorr = 0;
if(!HaveBase){ AccBase=AccCorr; HaveBase=1; }
}
var util_now(){
int mb = mem_mb_est();
var mem_pen = 0;
if(mb > MEM_BUDGET_MB) mem_pen = (mb - MEM_BUDGET_MB)/(var)MEM_BUDGET_MB;
return AccCorr - 0.5*mem_pen;
}
void depth_manager_runtime(DTree& dt){
int trigger = MEM_BUDGET_MB - MEM_HEADROOM_MB;
int mb = mem_mb_est();
if(mb < trigger) return;
if(ShedStage == 0) shed_zero_cost_once();
if(ShedStage <= 1) ShedStage = 2;
int overBudget = (mb >= MEM_BUDGET_MB);
if(!overBudget && (Bar - LastDepthActBar < DEPTH_STEP_BARS)) return;
while(RT_TreeMaxDepth > RUNTIME_MIN_DEPTH) {
int keepK = ifelse(mem_mb_est() < MEM_BUDGET_MB + 2, KEEP_CHILDREN_HI, KEEP_CHILDREN_LO);
dt.pruneSelectiveAtDepth(dt.Root, RT_TreeMaxDepth, keepK);
RT_TreeMaxDepth--;
// reindex caller does (strategy)
mb = mem_mb_est();
printf("\n[DepthMgr] depth=%i keepK=%i est=%i MB", RT_TreeMaxDepth, keepK, mb);
if(mb < trigger) break;
}
LastDepthActBar = Bar;
}
};
// ======================================================================
// ========================= Alpha12Strategy (owner) =====================
class Alpha12Strategy {
public:
// Components
NodePool pool;
DTree dt;
NetState net;
Alpha12Logger log;
RuntimeManager rt;
// Markov: HTF, LTF, REL
MarkovChain MH;
MarkovChain ML;
MarkovChain MR;
// Strategy runtime
int ready = 0;
int Epoch = 0;
int CtxID = 0;
// Markov adaptive knobs
var FB_W = 0.70;
var MC_ACT_dyn = MC_ACT;
var MC_Alpha = 1.0;
int CandNeigh = CAND_NEIGH;
// Rewire/update cursors
int RewirePos = 0;
int RewirePasses = 0;
int UpdatePos = 0;
int UpdatePasses = 0;
// Signal & trade
var LastSig = 0;
// Advisor budget/rotation
int AdviseMax = 16;
// Advisor seed cache (moved from static)
int seedBar = -1;
int haveSeed[NET_EQNS];
var seedVal[NET_EQNS];
public:
Alpha12Strategy() {
dt.bindPool(&pool);
memset(haveSeed,0,sizeof(haveSeed));
memset(seedVal,0,sizeof(seedVal));
}
~Alpha12Strategy(){ cleanup(); }
// --------------------- utilities (direct lifts) ---------------------
static var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
static var mapUnit(var u,var lo,var hi){
if(u<-1) u=-1;
if(u>1) u=1;
var t=0.5*(u+1.0);
return lo + t*(hi-lo);
}
static var safeNum(var x){ if(invalid(x)) return 0; return clamp(x,-1e100,1e100); }
static void sanitize(var* A,int n){ int k; for(k=0;k<n;k++) A[k]=safeNum(A[k]); }
static var sat100(var x){ return clamp(x,-100.,100.); }
static var nrm_s(var x) { return sat100(100.*tanh(x)); }
static var nrm_scl(var x, var s) { return sat100(100.*tanh(s*x)); }
// --------------- Candlestick pattern builder (unchanged) ------------
int buildCDL_TA61(var* out, string* names)
{
int n = 0;
#define ADD(Name, Call) do{ var v = (Call); if(out) out[n] = v/100.; if(names) names[n] = Name; n++; }while(0)
ADD("CDL2Crows", CDL2Crows());
ADD("CDL3BlackCrows", CDL3BlackCrows());
ADD("CDL3Inside", CDL3Inside());
ADD("CDL3LineStrike", CDL3LineStrike());
ADD("CDL3Outside", CDL3Outside());
ADD("CDL3StarsInSouth", CDL3StarsInSouth());
ADD("CDL3WhiteSoldiers", CDL3WhiteSoldiers());
ADD("CDLAbandonedBaby", CDLAbandonedBaby(0.3));
ADD("CDLAdvanceBlock", CDLAdvanceBlock());
ADD("CDLBeltHold", CDLBeltHold());
ADD("CDLBreakaway", CDLBreakaway());
ADD("CDLClosingMarubozu", CDLClosingMarubozu());
ADD("CDLConcealBabysWall", CDLConcealBabysWall());
ADD("CDLCounterAttack", CDLCounterAttack());
ADD("CDLDarkCloudCover", CDLDarkCloudCover(0.3));
ADD("CDLDoji", CDLDoji());
ADD("CDLDojiStar", CDLDojiStar());
ADD("CDLDragonflyDoji", CDLDragonflyDoji());
ADD("CDLEngulfing", CDLEngulfing());
ADD("CDLEveningDojiStar", CDLEveningDojiStar(0.3));
ADD("CDLEveningStar", CDLEveningStar(0.3));
ADD("CDLGapSideSideWhite", CDLGapSideSideWhite());
ADD("CDLGravestoneDoji", CDLGravestoneDoji());
ADD("CDLHammer", CDLHammer());
ADD("CDLHangingMan", CDLHangingMan());
ADD("CDLHarami", CDLHarami());
ADD("CDLHaramiCross", CDLHaramiCross());
ADD("CDLHignWave", CDLHignWave());
ADD("CDLHikkake", CDLHikkake());
ADD("CDLHikkakeMod", CDLHikkakeMod());
ADD("CDLHomingPigeon", CDLHomingPigeon());
ADD("CDLIdentical3Crows", CDLIdentical3Crows());
ADD("CDLInNeck", CDLInNeck());
ADD("CDLInvertedHammer", CDLInvertedHammer());
ADD("CDLKicking", CDLKicking());
ADD("CDLKickingByLength", CDLKickingByLength());
ADD("CDLLadderBottom", CDLLadderBottom());
ADD("CDLLongLeggedDoji", CDLLongLeggedDoji());
ADD("CDLLongLine", CDLLongLine());
ADD("CDLMarubozu", CDLMarubozu());
ADD("CDLMatchingLow", CDLMatchingLow());
ADD("CDLMatHold", CDLMatHold(0.5));
ADD("CDLMorningDojiStar", CDLMorningDojiStar(0.3));
ADD("CDLMorningStar", CDLMorningStar(0.3));
ADD("CDLOnNeck", CDLOnNeck());
ADD("CDLPiercing", CDLPiercing());
ADD("CDLRickshawMan", CDL_RickshawMan());
ADD("CDLRiseFall3Methods", CDL_RiseFall3Methods());
ADD("CDLSeperatingLines", CDL_SeperatingLines());
ADD("CDLShootingStar", CDL_ShootingStar());
ADD("CDLShortLine", CDL_ShortLine());
ADD("CDLSpinningTop", CDL_SpinningTop());
ADD("CDLStalledPattern", CDL_StalledPattern());
ADD("CDLStickSandwhich", CDL_StickSandwhich());
ADD("CDLTakuri", CDL_Takuri());
ADD("CDLTasukiGap", CDL_TasukiGap());
ADD("CDLThrusting", CDL_Thrusting());
ADD("CDLTristar", CDL_Tristar());
ADD("CDLUnique3River", CDL_Unique3River());
ADD("CDLUpsideGap2Crows", CDLUpsideGap2Crows());
ADD("CDLXSideGap3Methods", CDL_XSideGap3Methods());
#undef ADD
return n;
}
// Some Zorro candle functions have exact names; if your original compile used
// "CDLRickshawMan()" etc. then replace above stubs with exact spelling.
// To preserve your original code, you can revert those 10 lines back to your originals.
// --- quick aliases (in case of underscore variants) ---
var CDL_RickshawMan(){ return CDLRickshawMan(); }
var CDL_RiseFall3Methods(){ return CDLRiseFall3Methods(); }
var CDL_SeperatingLines(){ return CDLSeperatingLines(); }
var CDL_ShootingStar(){ return CDLShootingStar(); }
var CDL_ShortLine(){ return CDLShortLine(); }
var CDL_SpinningTop(){ return CDLSpinningTop(); }
var CDL_StalledPattern(){ return CDLStalledPattern(); }
var CDL_StickSandwhich(){ return CDLStickSandwhich(); }
var CDL_Takuri(){ return CDLTakuri(); }
var CDL_TasukiGap(){ return CDLTasukiGap(); }
var CDL_Thrusting(){ return CDLThrusting(); }
var CDL_Tristar(){ return CDLTristar(); }
var CDL_Unique3River(){ return CDLUnique3River(); }
var CDL_XSideGap3Methods(){ return CDLXSideGap3Methods(); }
// --------------- Markov relation mapping (unchanged) ----------------
int relFromHL(int sL, int sH){
if(sL <= 0 || sH <= 0) return MC_NONE;
int idxL = (sL - 1)/2; int bullL = ((sL - 1)%2)==1;
int idxH = (sH - 1)/2; int bullH = ((sH - 1)%2)==1;
if(idxL == idxH && bullL == bullH) return sL;
return MC_NONE;
}
int is_H1_close(){ return (Bar % TF_H1) == 0; }
// ------------------ memory estimator integration --------------------
int mem_mb_est() { return rt.mem_mb_est(); }
// ----------------------- strategy init/cleanup ----------------------
// ----------------------- strategy init/cleanup ----------------------
// ----------------------- strategy init/cleanup ----------------------
void init() {
if(ready) return;
// IMPORTANT: all bar-generation settings must be set BEFORE asset()
BarPeriod = BAR_PERIOD;
LookBack = max(300, NWIN);
// Optional, but if you use them they MUST also be before asset()
// StartDate = 20210101;
// EndDate = 20251231;
set(PLOTNOW);
// asset() must come AFTER BarPeriod/LookBack/StartDate/EndDate
asset(ASSET_SYMBOL);
// effective K
net.Keff = KPROJ;
if(net.Keff < 1) net.Keff = 1;
if(net.Keff > net.K) net.Keff = net.K;
// allocate components
net.allocate();
MH.alloc(); ML.alloc(); MR.alloc();
MH.Alpha = MC_Alpha; ML.Alpha = MC_Alpha; MR.Alpha = MC_Alpha;
dt.allocDepthLUT();
dt.DTreeExp = 0;
dt.Root = dt.createNode(MAX_DEPTH);
rt.RT_TreeMaxDepth = MAX_DEPTH;
dt.refreshDepthW();
// index tree and map EqTreeId
reindexTreeAndMap();
// projection matrix
net.randomizeRP();
net.computeProjection();
// initial full rewire
rewireEpoch(0,0,0,0);
log.writeEqHeaderOnce();
RewirePos=0; RewirePasses=0;
UpdatePos=0; UpdatePasses=0;
// seed cache reset
seedBar = -1;
memset(haveSeed,0,sizeof(haveSeed));
ready = 1;
printf("\n[Alpha12-OOP] init done: N=%i D=%i K=%i (Keff=%i) Depth=%i est=%i MB",
net.N, net.D, net.K, net.Keff, rt.RT_TreeMaxDepth, mem_mb_est());
}
void cleanup() {
if(!ready) return;
MH.freeMem(); ML.freeMem(); MR.freeMem();
dt.freeAll();
pool.freeAll();
net.freeAll();
ready = 0;
}
// --------------------- core tree reindex & mapping -------------------
void reindexTreeAndMap(){
dt.TreeN = 0;
dt.indexTreeDFS(dt.Root);
if(dt.TreeN <= 0){
dt.TreeN = 1;
if(dt.TreeIdx) dt.TreeIdx[0] = dt.Root;
}
// map eq -> tree id
{ int i; for(i=0;i<net.N;i++) net.EqTreeId[i] = (i16)(i % dt.TreeN); }
dt.resizePredCacheToTree();
dt.refreshEqAngles(net.EqTreeId, net.N);
dt.maybeShrinkTreeIdx();
rt.recalcTreeBytes(dt);
// recompute fixed bytes (include markov storage and expr storage if enabled)
int includeExpr = (LOG_EXPR_TEXT && net.Sym && !net.SymFreed) ? 1 : 0;
rt.computeMemFixedBytes(net, dt, includeExpr);
// add Markov bytes (3 chains)
rt.MemFixedBytes += 3*(MC_STATES*MC_STATES*(int)sizeof(int) + MC_STATES*(int)sizeof(int));
}
// --------------------- Markov updates (OOP) --------------------------
void updateMarkov_5M(){
static var CDL_L[MC_NPAT];
buildCDL_TA61(CDL_L,0);
int s = MarkovChain::stateFromCDL(CDL_L, MC_ACT_dyn);
if(Bar > LookBack) ML.update(ML.Prev, s);
ML.Prev = s;
if(s > 0 && s < MC_STATES){
if(ML.RowSum[s] > 0) ML.rowStats(s, &ML.PBullNext, &ML.Entropy);
ML.Cur = s;
}
}
void updateMarkov_1H(){
int saveTF = TimeFrame;
TimeFrame = TF_H1;
static var CDL_H[MC_NPAT];
buildCDL_TA61(CDL_H,0);
int sH = MarkovChain::stateFromCDL(CDL_H, MC_ACT_dyn);
if(Bar > LookBack) MH.update(MH.Prev, sH);
MH.Prev = sH;
if(sH > 0 && sH < MC_STATES){
if(MH.RowSum[sH] > 0) MH.rowStats(sH, &MH.PBullNext, &MH.Entropy);
MH.Cur = sH;
}
TimeFrame = saveTF;
}
void updateMarkov_REL(){
int r = relFromHL(ML.Cur, MH.Cur);
if(Bar > LookBack) MR.update(MR.Prev, r);
MR.Prev = r;
if(r > 0 && r < MC_STATES){
if(MR.RowSum[r] > 0) MR.rowStats(r, &MR.PBullNext, &MR.Entropy);
MR.Cur = r;
}
}
void updateAllMarkov(){
// propagate Laplace alpha
MH.Alpha = MC_Alpha;
ML.Alpha = MC_Alpha;
MR.Alpha = MC_Alpha;
updateMarkov_5M();
if(is_H1_close()){
updateMarkov_1H();
updateMarkov_REL();
}
}
// --------------------- Advisor rotation (unchanged) ------------------
int allowAdvise(int i){
int groups = net.N / AdviseMax;
if(groups < 1) groups = 1;
return ((i / AdviseMax) % groups) == (Bar % groups);
}
// --------------------- DTREE advisor wrappers ------------------------
var adviseEq(int i, var lambda, var mean, var energy, var power){
if(!allowAdvise(i)) return 0;
if(is(INITRUN)) return 0;
int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
if(tight) return 0;
if(net.HitN[i] > 32){
var h = (var)net.HitEW[i];
var gate = 0.40 + 0.15*(1.0 - MH.Entropy);
if(h < gate){
if(random() >= 0.5) return 0;
}
}
int tid = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
var pred = dt.predByTid(tid);
var S[ADV_EQ_NF];
buildEqFeatures(i, lambda, mean, energy, power, pred, S);
var obj = 0;
if(Train){
obj = sat100(100.0*tanh(0.6*lambda + 0.4*mean));
var prior = 0.75 + 0.5*((var)net.HitEW[i] - 0.5);
obj *= prior;
// cycle priors
{ var th_i = (dt.EqTheta ? dt.EqTheta[i] : 0);
var dphi = DTree::angDiff(dt.CycPh, th_i);
var align = 0.90 + 0.20*(0.5*(cos(dphi)+1.0));
var spdOK = 0.90 + 0.20*clamp(abs(dt.CycSpd)/(0.15), 0., 1.);
obj *= align * spdOK;
}
}
int objI = (int)obj;
var a = adviseLong(DTREE, objI, S, ADV_EQ_NF);
return a/100.;
}
var adviseSeed(int i, var lambda, var mean, var energy, var power){
if(seedBar != Bar){
int k; for(k=0;k<net.N;k++) haveSeed[k]=0;
seedBar = Bar;
}
if(i < 0) i=0; if(i>=net.N) i=i % net.N;
if(!allowAdvise(i)) return 0;
if(!haveSeed[i]){
seedVal[i] = adviseEq(i,lambda,mean,energy,power);
haveSeed[i] = 1;
}
return seedVal[i];
}
static var mix01(var a, int salt){
var z = sin(123.456*a + 0.001*salt) + cos(98.765*a + 0.002*salt);
return tanh(0.75*z);
}
static var mapA(var a,var lo,var hi){ return mapUnit(a,lo,hi); }
// ------------------- Feature builders (unchanged logic) ---------------
void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var pred, var* S /*ADV_EQ_NF*/) {
int tid = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
Node* t = dt.treeAt(tid);
var th_i = (dt.EqTheta ? dt.EqTheta[i] : 0);
var dphi = DTree::angDiff(dt.CycPh, th_i);
var alignC = cos(dphi);
var alignS = sin(dphi);
S[0] = nrm_s(net.State[i]);
S[1] = nrm_s(mean);
S[2] = nrm_scl(power,0.05);
S[3] = nrm_scl(energy,0.01);
S[4] = nrm_s(lambda);
S[5] = sat100(200.0*(pred-0.5));
S[6] = sat100(200.0*((var)t->d/MAX_DEPTH)-100.0);
S[7] = sat100(1000.0*t->r);
S[8] = nrm_s((var)net.TreeTerm[i]);
S[9] = sat100( (200.0/3.0) * (var)((int)net.Mode[i]) - 100.0 );
// HTF
S[10] = sat100(200.0*(MH.PBullNext-0.5));
S[11] = sat100(200.0*(MH.Entropy-0.5));
S[12] = sat100(200.0*((var)net.HitEW[i] - 0.5));
S[13] = sat100(100.*alignC);
S[14] = sat100(100.*alignS);
// 5M & Relation
S[15] = sat100(200.0*(ML.PBullNext - 0.5));
S[16] = sat100(200.0*(ML.Entropy - 0.5));
S[17] = sat100(200.0*(MR.PBullNext - 0.5));
S[18] = sat100(200.0*(MR.Entropy - 0.5));
sanitize(S,ADV_EQ_NF);
}
// ---------------- adjacency scoring (heuristic only) ------------------
var scorePairSafe(int i, int j, var lambda, var mean, var energy, var power){
int ti = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
int tj = dt.safeTreeIndexFromEq((int)net.EqTreeId[j], dt.TreeN);
Node* ni = dt.treeAt(ti);
Node* nj = dt.treeAt(tj);
var simD = 1.0 / (1.0 + abs((var)ni->d - (var)nj->d));
var dr = 50.0*abs(ni->r - nj->r);
var simR = 1.0 / (1.0 + dr);
var predi = dt.predByTid(ti);
var predj = dt.predByTid(tj);
var pred = 0.5*(predi + predj);
var score = 0.5*pred + 0.3*simD + 0.2*simR;
return 2.0*score - 1.0;
}
void rewireAdjacency_DTREE_range(int i0,int i1, var lambda, var mean, var energy, var power){
int i,d,c,best,cand;
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
for(i=i0;i<i1;i++){
for(d=0; d<net.D; d++){
var bestScore = -2; best=-1;
for(c=0;c<CandNeigh;c++){
cand = (int)random(net.N);
if(cand==i) continue;
int clash=0,k;
for(k=0;k<d;k++){
int prev = net.Adj[i*net.D + k];
if(prev>=0 && prev==cand){ clash=1; break; }
}
if(clash) continue;
var s = scorePairSafe(i,cand,lambda,mean,energy,power);
if(s > bestScore){ bestScore=s; best=cand; }
}
if(best<0){ do{ best=(int)random(net.N);} while(best==i); }
net.Adj[i*net.D + d] = (i16)best;
}
}
}
// ---------------- coefficient synthesis (unchanged) -------------------
void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power){
var seed = adviseSeed(i,lambda,mean,energy,power);
net.Mode[i] = (int)(abs(1000*seed)) & 3;
net.WSelf[i] = (fvar)mapA(mix01(seed, 11), 0.15, 0.85);
net.WN1[i] = (fvar)mapA(mix01(seed, 12), 0.05, 0.35);
net.WN2[i] = (fvar)mapA(mix01(seed, 13), 0.05, 0.35);
net.WGlob1[i] = (fvar)mapA(mix01(seed, 14), 0.05, 0.30);
net.WGlob2[i] = (fvar)mapA(mix01(seed, 15), 0.05, 0.30);
net.WMom[i] = (fvar)mapA(mix01(seed, 16), 0.02, 0.15);
net.WTree[i] = (fvar)mapA(mix01(seed, 17), 0.05, 0.35);
net.WAdv[i] = (fvar)mapA(mix01(seed, 18), 0.05, 0.35);
net.A1x[i] = (fvar)(randsign()*mapA(mix01(seed, 21), 0.6, 1.2));
net.A1lam[i] = (fvar)(randsign()*mapA(mix01(seed, 22), 0.05,0.35));
net.A1mean[i]= (fvar) mapA(mix01(seed, 23),-0.30,0.30);
net.A1E[i] = (fvar) mapA(mix01(seed, 24),-0.0015,0.0015);
net.A1P[i] = (fvar) mapA(mix01(seed, 25),-0.30,0.30);
net.A1i[i] = (fvar) mapA(mix01(seed, 26),-0.02,0.02);
net.A1c[i] = (fvar) mapA(mix01(seed, 27),-0.20,0.20);
net.A2x[i] = (fvar)(randsign()*mapA(mix01(seed, 31), 0.6, 1.2));
net.A2lam[i] = (fvar)(randsign()*mapA(mix01(seed, 32), 0.05,0.35));
net.A2mean[i]= (fvar) mapA(mix01(seed, 33),-0.30,0.30);
net.A2E[i] = (fvar) mapA(mix01(seed, 34),-0.0015,0.0015);
net.A2P[i] = (fvar) mapA(mix01(seed, 35),-0.30,0.30);
net.A2i[i] = (fvar) mapA(mix01(seed, 36),-0.02,0.02);
net.A2c[i] = (fvar) mapA(mix01(seed, 37),-0.20,0.20);
net.G1mean[i] = (fvar) mapA(mix01(seed, 41), 0.4, 1.6);
net.G1E[i] = (fvar) mapA(mix01(seed, 42),-0.004,0.004);
net.G2P[i] = (fvar) mapA(mix01(seed, 43), 0.1, 1.2);
net.G2lam[i] = (fvar) mapA(mix01(seed, 44), 0.05, 0.7);
net.TAlpha[i] = (fvar) mapA(mix01(seed, 51), 0.3, 1.5);
net.TBeta[i] = (fvar) mapA(mix01(seed, 52), 6.0, 50.0);
net.PropRaw[i] = (fvar)(0.01 + 0.99*(0.5*(seed+1.0)));
// reliability boost
{ var boost = 0.75 + 0.5*(var)net.HitEW[i];
net.PropRaw[i] = (fvar)((var)net.PropRaw[i] * boost);
}
}
void synthesizeEquation_range(int i0,int i1, var lambda, var mean, var energy, var power){
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
int i;
for(i=i0;i<i1;i++) synthesizeEquationFromDTREE(i,lambda,mean,energy,power);
}
// ------------------- DTREE ensemble term (unchanged) ------------------
var dtreeTerm(int i, int* outTopEq, var* outTopW){
int j;
int tid_i = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
Node* ti = dt.treeAt(tid_i);
int di = ti->d; var ri=ti->r;
var predI = dt.predByTid(tid_i);
var alpha = (var)net.TAlpha[i];
var beta = (var)net.TBeta[i];
var sumw=0, acc=0, bestW=-1; int bestJ=-1;
for(j=0;j<net.N;j++){
if(j==i) continue;
int tid_j = dt.safeTreeIndexFromEq((int)net.EqTreeId[j], dt.TreeN);
Node* tj=dt.treeAt(tid_j);
int dj=tj->d; var rj=tj->r;
var predJ = dt.predByTid(tid_j);
var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
var predBoost = 0.5 + 0.5*(predI*predJ);
var propBoost = 0.5 + 0.5*( (net.Prop[i] + net.Prop[j]) );
w *= predBoost * propBoost;
var pairAdv = scorePairSafe(i,j,0,0,0,0);
var pairBoost = 0.75 + 0.25*(0.5*(pairAdv+1.0));
w *= pairBoost;
sumw += w;
acc += w*net.State[j];
if(w>bestW){ bestW=w; bestJ=j; }
}
if(outTopEq) *outTopEq = bestJ;
if(outTopW) *outTopW = ifelse(sumw>0, bestW/sumw, 0);
if(sumw>0) return acc/sumw;
return 0;
}
// ------------------- expression builder (optional) --------------------
void buildSymbolicExpr(int i, int n1, int n2){
if(!LOG_EXPR_TEXT) return;
if(!net.Sym) return;
string s = net.Sym[i];
s[0]=0;
string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
(var)net.A1x[i], n1, (var)net.A1lam[i], (var)net.A1mean[i], (var)net.A1E[i], (var)net.A1P[i], (var)net.A1i[i], (var)net.A1c[i]);
string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
(var)net.A2x[i], n2, (var)net.A2lam[i], (var)net.A2mean[i], (var)net.A2E[i], (var)net.A2P[i], (var)net.A2i[i], (var)net.A2c[i]);
Alpha12Logger::strlcat_safe(s, "x[i]_next = ", EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*x[i] + ", (var)net.WSelf[i]), EXPR_MAXLEN);
if(net.Mode[i]==1){
Alpha12Logger::strlcat_safe(s, strf("%.3f*tanh%s + ", (var)net.WN1[i], a1), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*sin%s + ", (var)net.WN2[i], a2), EXPR_MAXLEN);
} else if(net.Mode[i]==2){
Alpha12Logger::strlcat_safe(s, strf("%.3f*cos%s + ", (var)net.WN1[i], a1), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*tanh%s + ", (var)net.WN2[i], a2), EXPR_MAXLEN);
} else {
Alpha12Logger::strlcat_safe(s, strf("%.3f*sin%s + ", (var)net.WN1[i], a1), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*cos%s + ", (var)net.WN2[i], a2), EXPR_MAXLEN);
}
Alpha12Logger::strlcat_safe(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ",
(var)net.WGlob1[i], (var)net.G1mean[i], (var)net.G1E[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",
(var)net.WGlob2[i], (var)net.G2P[i], (var)net.G2lam[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*(x[i]-x_prev[i]) + ", (var)net.WMom[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("Prop[i]=%.4f; ", (var)net.Prop[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*DT(i) + ", (var)net.WTree[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*DTREE(i)", (var)net.WAdv[i]), EXPR_MAXLEN);
}
void buildSymbolicExpr_range(int i0,int i1){
if(!LOG_EXPR_TEXT) return;
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
int i;
for(i=i0;i<i1;i++){
int n1 = net.adjSafe(i,0);
int n2 = ifelse(net.D>=2, net.adjSafe(i,1), n1);
buildSymbolicExpr(i,n1,n2);
}
}
// ------------------- chunked rewire orchestrator ----------------------
int rewireEpochChunk(var lambda, var mean, var energy, var power, int batch){
if(net.N <= 0) return 0;
if(batch < REWIRE_MIN_BATCH) batch = REWIRE_MIN_BATCH;
if(RewirePos >= net.N) RewirePos = 0;
int i0 = RewirePos;
int i1 = i0 + batch; if(i1 > net.N) i1 = net.N;
CandNeigh = ifelse(MH.Entropy < 0.45, CAND_NEIGH+4, CAND_NEIGH);
rewireAdjacency_DTREE_range(i0,i1, lambda,mean,energy,power);
net.sanitizeAdjacency();
synthesizeEquation_range(i0,i1, lambda,mean,energy,power);
buildSymbolicExpr_range(i0,i1);
RewirePos = i1;
if(RewirePos >= net.N){
RewirePos = 0;
RewirePasses++;
return 1;
}
return 0;
}
void rewireEpoch(var lambda, var mean, var energy, var power){
int done=0;
while(!done){
done = rewireEpochChunk(lambda,mean,energy,power, REWIRE_BATCH_EQ_H1);
}
net.normalizeProportions();
// context hash (unchanged)
{
int D = net.D, i, total = net.N*D;
unsigned int h = 2166136261u;
for(i=0;i<total;i++){
unsigned int x = (unsigned int)net.Adj[i];
h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
}
CtxID = (int)((h ^ ((unsigned int)Epoch<<8)) & 0x7fffffff);
}
}
// ------------------- coarse net projection -> gamma -------------------
var projectNet(){
int i;
var sum=0,sumsq=0,cross=0;
for(i=0;i<net.N;i++){
sum += net.State[i];
sumsq += net.State[i]*net.State[i];
if(i+1<net.N) cross += net.State[i]*net.State[i+1];
}
var mean = sum/net.N;
var corr = cross/(net.N-1);
return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}
// ------------------- heavy update chunk (unchanged) -------------------
var nonlin1(int i, int n1, var lam, var mean, var E, var P){
var x = net.State[n1];
var arg = (var)net.A1x[i]*x + (var)net.A1lam[i]*lam + (var)net.A1mean[i]*mean + (var)net.A1E[i]*E + (var)net.A1P[i]*P + (var)net.A1i[i]*i + (var)net.A1c[i];
return arg;
}
var nonlin2(int i, int n2, var lam, var mean, var E, var P){
var x = net.State[n2];
var arg = (var)net.A2x[i]*x + (var)net.A2lam[i]*lam + (var)net.A2mean[i]*mean + (var)net.A2E[i]*E + (var)net.A2P[i]*P + (var)net.A2i[i]*i + (var)net.A2c[i];
return arg;
}
int heavyUpdateChunk(var lambda, var mean, var energy, var power, int batch){
if(net.N <= 0) return 0;
if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;
if(UpdatePos >= net.N) UpdatePos = 0;
int i0 = UpdatePos;
int i1 = i0 + batch; if(i1 > net.N) i1 = net.N;
net.computeProjection();
int i;
for(i=i0;i<i1;i++){
int n1 = net.adjSafe(i,0);
int n2 = ifelse(net.D>=2, net.adjSafe(i,1), n1);
int topEq=-1; var topW=0;
var treeT = dtreeTerm(i, &topEq, &
|
|
|
The Candle Oracle Lattice (cont.)
[Re: TipmyPip]
#489156
Yesterday at 10:21
Yesterday at 10:21
|
Joined: Sep 2017
Posts: 200
TipmyPip
OP
Member
|
OP
Member
Joined: Sep 2017
Posts: 200
|
int heavyUpdateChunk(var lambda, var mean, var energy, var power, int batch){
if(net.N <= 0) return 0;
if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;
if(UpdatePos >= net.N) UpdatePos = 0;
int i0 = UpdatePos;
int i1 = i0 + batch; if(i1 > net.N) i1 = net.N;
net.computeProjection();
int i;
for(i=i0;i<i1;i++){
int n1 = net.adjSafe(i,0);
int n2 = ifelse(net.D>=2, net.adjSafe(i,1), n1);
int topEq=-1; var topW=0;
var treeT = dtreeTerm(i, &topEq, &topW);
net.TreeTerm[i] = (fvar)treeT;
net.TopEq[i] = (i16)topEq;
net.TopW[i] = (fvar)topW;
var adv = adviseEq(i, lambda, mean, energy, power);
var a1 = nonlin1(i,n1,lambda,mean,energy,power);
var a2 = nonlin2(i,n2,lambda,mean,energy,power);
var t1,t2;
if(net.Mode[i]==1){ t1=tanh(a1); t2=sin(a2); }
else if(net.Mode[i]==2){ t1=cos(a1); t2=tanh(a2); }
else { t1=sin(a1); t2=cos(a2); }
var glob1 = tanh((var)net.G1mean[i]*mean + (var)net.G1E[i]*energy);
var glob2 = sin ((var)net.G2P[i]*power + (var)net.G2lam[i]*lambda);
var mom = (net.State[i] - net.Prev[i]);
var xnext =
(var)net.WSelf[i]*net.State[i]
+ (var)net.WN1[i]*t1
+ (var)net.WN2[i]*t2
+ (var)net.WGlob1[i]*glob1
+ (var)net.WGlob2[i]*glob2
+ (var)net.WMom[i]*mom
+ (var)net.WTree[i]*treeT
+ (var)net.WAdv[i]*adv;
xnext = clamp(xnext, -10., 10.);
net.Prev[i] = net.State[i];
net.State[i] = xnext;
net.StateSq[i] = xnext*xnext;
net.AdvPrev[i] = (fvar)adv;
// meta logging (sampled)
if(!rt.LogsOff && (Bar % LOG_EVERY)==0 && (i < LOG_EQ_SAMPLE)){
int tid = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
Node* tnode = dt.treeAt(tid);
int nodeDepth = (tnode ? tnode->d : 0);
var rate = (var)net.TBeta[i];
var pred = dt.predByTid(tid);
string expr = 0;
if(LOG_EXPR_TEXT && net.Sym) expr = net.Sym[i];
log.appendEqMetaLine(Bar, Epoch, CtxID,
i, n1, n2, tid, nodeDepth, rate, pred, adv, net.Prop[i], (int)net.Mode[i],
(var)net.WAdv[i], (var)net.WTree[i], MH.PBullNext, MH.Entropy, (int)MH.Cur,
expr);
}
}
UpdatePos = i1;
if(UpdatePos >= net.N){
UpdatePos = 0;
UpdatePasses++;
return 1;
}
return 0;
}
// ------------------- hit-rate scorer (unchanged) ----------------------
void updateHitRates(){
if(is(INITRUN)) return;
if(Bar <= LookBack) return;
var r = net.Ret1;
var sgnR = sign(r);
int i;
for(i=0;i<net.N;i++){
var a = (var)net.AdvPrev[i];
var sgnA = ifelse(a > 0.0001, 1, ifelse(a < -0.0001, -1, 0));
var hit = ifelse(sgnR == 0, 0.5, ifelse(sgnA == sgnR, 1.0, 0.0));
net.HitEW[i] = (fvar)((1.0 - 0.02)*(var)net.HitEW[i] + 0.02*hit);
net.HitN[i] += 1;
}
}
// ------------------- blend lambda/gamma & accuracy --------------------
var blendLambdaGamma(var lambda_raw, var gamma_raw){
var w = clamp(FB_W + 0.15*(0.5 - MH.Entropy), 0.4, 0.9);
var x = w*lambda_raw + (1.0 - w)*gamma_raw;
rt.acc_update(lambda_raw, gamma_raw);
return x;
}
// ------------------- rewire scheduler (unchanged logic) ----------------
void maybeRewireNow(var lambda, var mean, var energy, var power){
int mb = mem_mb_est();
if(mb >= UPDATE_MEM_HARD) return;
int batch = ifelse(is_H1_close(), REWIRE_BATCH_EQ_H1, REWIRE_BATCH_EQ_5M);
if(mb >= REWIRE_MEM_SOFT) batch = (batch>>1);
if(batch < REWIRE_MIN_BATCH) batch = REWIRE_MIN_BATCH;
int finished = rewireEpochChunk(lambda,mean,energy,power,batch);
if(finished && (RewirePasses % REWIRE_NORM_EVERY) == 0){
net.normalizeProportions();
log.writeEqHeaderOnce();
if((RewirePasses % META_EVERY) == 0){
int D = net.D, i, total = net.N*D;
unsigned int h = 2166136261u;
for(i=0;i<total;i++){
unsigned int x = (unsigned int)net.Adj[i];
h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
}
CtxID = (int)((h ^ ((unsigned int)Epoch<<8)) & 0x7fffffff);
}
}
}
// ------------------- heavy update scheduler (unchanged) ---------------
void runHeavyUpdates(var lambda, var mean, var energy, var power){
int mb = mem_mb_est();
if(mb >= UPDATE_MEM_HARD) return;
int batch = ifelse(is_H1_close(), UPDATE_BATCH_EQ_H1, UPDATE_BATCH_EQ_5M);
if(mb >= UPDATE_MEM_SOFT) batch = (batch>>1);
if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;
heavyUpdateChunk(lambda,mean,energy,power,batch);
}
// ------------------- main engine step (unchanged) ---------------------
void alpha12_step(var ret1_now){
if(!ready) return;
updateAllMarkov();
if(Bar < LookBack){
net.computeProjection();
net.Ret1 = ret1_now;
// adapt MC threshold slowly
var h=0; int i;
for(i=0;i<net.N;i++) h += (var)net.HitEW[i];
if(net.N > 0) h /= (var)net.N; else h=0.5;
var target = MC_ACT + 0.15*(0.55 - h) + 0.10*(MH.Entropy - 0.5);
target = clamp(target, 0.20, 0.50);
MC_ACT_dyn = 0.95*MC_ACT_dyn + 0.05*target;
return;
}
// lambda from projection
net.computeProjection();
int Keff = net.keffClamped();
int k;
var e=0, pwr=0;
for(k=0;k<Keff;k++){ var z=(var)net.Z[k]; e+=z; pwr+=z*z; }
var mean=0, power=0;
if(Keff > 0){ mean = e/(var)Keff; power = pwr/(var)Keff; }
var energy = pwr;
var lambda = 0.7*tanh(mean) + 0.3*tanh(0.05*power);
maybeRewireNow(lambda,mean,energy,power);
runHeavyUpdates(lambda,mean,energy,power);
var gamma = projectNet();
var x = blendLambdaGamma(lambda,gamma);
(void)x;
// cycle tracker
dt.updateEquationCycle(net.Prop, net.N);
// score previous advisors
net.Ret1 = ret1_now;
updateHitRates();
// depth manager (pruning) – note: reindex after prune if depth reduced
int beforeDepth = rt.RT_TreeMaxDepth;
rt.depth_manager_runtime(dt);
if(rt.RT_TreeMaxDepth != beforeDepth){
reindexTreeAndMap();
}
// adapt MC acceptance threshold
{
var h=0; int i;
for(i=0;i<net.N;i++) h += (var)net.HitEW[i];
if(net.N > 0) h /= (var)net.N; else h=0.5;
var target = MC_ACT + 0.15*(0.55 - h) + 0.10*(MH.Entropy - 0.5);
target = clamp(target, 0.20, 0.50);
MC_ACT_dyn = 0.9*MC_ACT_dyn + 0.1*target;
}
}
// ------------------- realized 1-bar return (unchanged) ----------------
var realizedRet1(){
vars C = series(priceClose(0));
if(Bar <= LookBack) return 0;
return C[0] - C[1];
}
// ------------------- trading signal (unchanged) -----------------------
var tradeSignal(){
if(!ready) return 0;
if(!net.RP || !net.Z || !net.StateSq) return 0;
net.computeProjection();
int Keff = net.keffClamped();
if(Keff <= 0) return 0;
int k;
var e=0, pwr=0;
for(k=0;k<Keff;k++){ var z=(var)net.Z[k]; e+=z; pwr+=z*z; }
var mean=0, power=0;
if(Keff > 0){ mean = e/(var)Keff; power = pwr/(var)Keff; }
var lambda = 0.7*tanh(mean) + 0.3*tanh(0.05*power);
var gamma = projectNet();
var x = blendLambdaGamma(lambda,gamma);
LastSig = x;
var gLong=0, gShort=0;
if(MH.PBullNext >= PBULL_LONG_TH) gLong = 1.0;
if(MH.PBullNext <= PBULL_SHORT_TH) gShort= 1.0;
var s=0;
if(x > 0) s = x*gLong;
else s = x*gShort;
var conf = 1.0 - 0.5*(MR.Entropy);
s *= conf;
return clamp(s,-1.,1.);
}
// ------------------- position sizing & orders (unchanged) -------------
var posSizeFromSignal(var s){
var base = 1;
var scale = 2.0*abs(s);
return base * (0.5 + 0.5*scale);
}
void placeOrders(var s){
if(s > 0){
if(NumOpenShort) exitShort();
if(!NumOpenLong){
Lots = posSizeFromSignal(s);
enterLong();
}
} else if(s < 0){
if(NumOpenLong) exitLong();
if(!NumOpenShort){
Lots = posSizeFromSignal(s);
enterShort();
}
}
}
// ------------------- plotting guard -------------------
void plotSafe(string name, var v){
if(ENABLE_PLOTS && !rt.ChartsOff) plot(name, v, NEW|LINE, 0);
}
// ------------------- per-bar wrapper -------------------
void onBar(){
var r1 = realizedRet1();
alpha12_step(r1);
var s = tradeSignal();
placeOrders(s);
plotSafe("PBull(1H)", 100*(MH.PBullNext-0.5));
plotSafe("PBull(5M)", 100*(ML.PBullNext-0.5));
plotSafe("PBull(Rel)", 100*(MR.PBullNext-0.5));
plotSafe("Entropy(1H)", 100*(MH.Entropy));
plotSafe("Sig", 100*LastSig);
}
};
// ======================================================================
// ========================= Zorro DLL entry (bridge) ====================
DLLFUNC void run()
{
if(is(INITRUN)) {
if(!gAlpha12) gAlpha12 = new Alpha12Strategy();
gAlpha12->init();
}
if(!gAlpha12 || !gAlpha12->ready)
return;
// warmup behavior: keep Markov updated and projection alive
if(is(LOOKBACK) || Bar < LookBack) {
gAlpha12->updateAllMarkov();
gAlpha12->net.computeProjection();
return;
}
gAlpha12->onBar();
if(is(EXITRUN)) {
gAlpha12->cleanup();
delete gAlpha12;
gAlpha12 = nullptr;
}
}
|
|
|
The Candle Oracle Lattice (CUDA version)
[Re: TipmyPip]
#489160
Yesterday at 12:58
Yesterday at 12:58
|
Joined: Sep 2017
Posts: 200
TipmyPip
OP
Member
|
OP
Member
Joined: Sep 2017
Posts: 200
|
// Zorro64 C++ Strategy DLL - Alpha12 (FULL OOP Refactor)
// Compile as x64 DLL, include zorro.h
// ======================================================================
//
// Refactor goals achieved:
// - ALL prior “globals” moved into Alpha12Strategy (and its components).
// - Components: NodePool, DTree, MarkovChain, NetState, Logger, RuntimeManager.
// - No more static state inside functions (seedBar/haveSeed/seedVal moved into members).
// - run() remains a thin C bridge.
//
// Notes:
// - Logic is preserved: this is primarily an encapsulation/refactor.
// - Memory management remains malloc/free like your original (safe incremental step).
// ======================================================================
#define _CRT_SECURE_NO_WARNINGS
#include <zorro.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <windows.h>
#include <stddef.h> // for size_t (or use #include <cstddef>)
// ======================================================================
// ================= USER CONFIG =================
#define ASSET_SYMBOL "EUR/USD"
#define BAR_PERIOD 5
#define TF_H1 12
#define MC_ACT 0.30 // initial threshold on |CDL| in [-1..1] to accept a pattern
#define PBULL_LONG_TH 0.60 // Markov gate for long
#define PBULL_SHORT_TH 0.40 // Markov gate for short
// Debug toggles
#define ENABLE_PLOTS 0
#define ENABLE_WATCH 0
// ================= ENGINE PARAMETERS =================
#define MAX_BRANCHES 3
#define MAX_DEPTH 4
#define NWIN 256
#define NET_EQNS 100
#define DEGREE 4
#define KPROJ 16
#define REWIRE_EVERY 127
#define CAND_NEIGH 8
// ===== LOGGING CONTROLS =====
#define LOG_EQ_TO_ONE_FILE 1
#define LOG_EXPR_TEXT 0
#define META_EVERY 4
#define LOG_EQ_SAMPLE NET_EQNS
#define EXPR_MAXLEN 512
#define LOG_EVERY 16
#define MC_EVERY 1
// ---- DTREE feature sizes ----
#define ADV_EQ_NF 19
#define ADV_PAIR_NF 12
// ================= Candles ? 122-state Markov =================
#define MC_NPAT 61
#define MC_STATES 123
#define MC_NONE 0
// ================= Runtime Memory / Accuracy Manager =================
#define MEM_BUDGET_MB 50
#define MEM_HEADROOM_MB 5
#define DEPTH_STEP_BARS 16
#define KEEP_CHILDREN_HI 2
#define KEEP_CHILDREN_LO 1
#define RUNTIME_MIN_DEPTH 2
// ===== Chunked rewire settings =====
#define REWIRE_BATCH_EQ_5M 24
#define REWIRE_BATCH_EQ_H1 64
#define REWIRE_MIN_BATCH 8
#define REWIRE_NORM_EVERY 1
#define REWIRE_MEM_SOFT (MEM_BUDGET_MB - 4)
#define REWIRE_MEM_HARD (MEM_BUDGET_MB - 1)
// ===== Chunked update settings =====
#define UPDATE_BATCH_EQ_5M 32
#define UPDATE_BATCH_EQ_H1 96
#define UPDATE_MIN_BATCH 8
#define UPDATE_MEM_SOFT (MEM_BUDGET_MB - 4)
#define UPDATE_MEM_HARD (MEM_BUDGET_MB - 1)
// ======================= Tight-memory switches =======================
#define TIGHT_MEM 1
#ifdef TIGHT_MEM
typedef float fvar;
typedef short i16;
typedef char i8;
#else
typedef var fvar;
typedef int i16;
typedef int i8;
#endif
// ---------- Candle wrapper name mapping ----------
// Map Alpha12 "canonical" names -> whatever YOUR zorro.h actually provides.
#ifndef A12_CDL_HIGH_WAVE
#define A12_CDL_HIGH_WAVE() CDLHignWave()
#endif
#ifndef A12_CDL_STICKSANDWICH
#define A12_CDL_STICKSANDWICH() CDLStickSandwhich()
#endif
#ifndef A12_CDL_SEPARATING_LINES
#define A12_CDL_SEPARATING_LINES() CDLSeperatingLines()
#endif
#ifndef A12_CDL_CONCEALING_BABY_SWALLOW
#define A12_CDL_CONCEALING_BABY_SWALLOW() CDLConcealBabysWall()
#endif
// ======================================================================
// Forward declarations
class Alpha12Strategy;
static Alpha12Strategy* gAlpha12 = nullptr;
// ======================================================================
// ========================= NodePool (component) ========================
struct Node {
var v;
var r;
void* c;
int n;
int d;
};
struct NodeChunk {
NodeChunk* next;
int used;
int _pad;
Node nodes[256];
};
class NodePool {
NodeChunk* head = 0;
Node* freeList = 0;
public:
~NodePool() { freeAll(); }
Node* allocNode() {
if(freeList) {
Node* n = freeList;
freeList = (Node*)n->c;
n->c = 0; n->n = 0; n->d = 0; n->v = 0; n->r = 0;
return n;
}
if(!head || head->used >= 256) {
NodeChunk* ch = (NodeChunk*)malloc(sizeof(NodeChunk));
if(!ch) quit("Alpha12: OOM allocating NodeChunk");
memset(ch, 0, sizeof(NodeChunk));
ch->next = head;
head = ch;
}
if(head->used < 0 || head->used >= 256) quit("Alpha12: Corrupt node pool state");
return &head->nodes[head->used++];
}
void freeNode(Node* u) {
if(!u) return;
u->c = (void*)freeList;
freeList = u;
}
void freeAll() {
NodeChunk* ch = head;
while(ch) {
NodeChunk* nx = ch->next;
free(ch);
ch = nx;
}
head = 0;
freeList = 0;
}
};
// ======================================================================
// ========================= MarkovChain (component) =====================
class MarkovChain {
public:
int* Count = 0; // [MC_STATES*MC_STATES]
int* RowSum = 0; // [MC_STATES]
int Prev = -1;
int Cur = 0;
var PBullNext = 0.5;
var Entropy = 1.0;
var Alpha = 1.0; // Laplace smoothing
public:
void alloc() {
int NN = MC_STATES*MC_STATES;
int bytesMat = NN*sizeof(int);
int bytesRow = MC_STATES*sizeof(int);
Count = (int*)malloc(bytesMat);
RowSum = (int*)malloc(bytesRow);
if(!Count || !RowSum) quit("Alpha12: OOM in MarkovChain::alloc");
memset(Count,0,bytesMat);
memset(RowSum,0,bytesRow);
Prev = -1; Cur = 0; PBullNext = 0.5; Entropy = 1.0;
}
void freeMem() {
if(Count) free(Count);
if(RowSum) free(RowSum);
Count = RowSum = 0;
Prev = -1; Cur = 0; PBullNext = 0.5; Entropy = 1.0;
}
static int isBull(int s){
if(s<=0) return 0;
return ((s-1)%2)==1;
}
static int stateFromCDL(var* cdl /*len=61*/, var thr) {
int i, best=-1;
var besta=0;
for(i=0;i<MC_NPAT;i++){
var a = abs(cdl[i]);
if(a>besta){ besta=a; best=i; }
}
if(best<0) return MC_NONE;
if(besta < thr) return MC_NONE;
int bull = (cdl[best] > 0);
return 1 + 2*best + bull; // 1..122
}
int idx(int fr,int to) const { return fr*MC_STATES + to; }
void update(int sPrev,int sCur){
if(sPrev<0) return;
Count[idx(sPrev,sCur)]++;
RowSum[sPrev]++;
}
var prob(int s,int t) const {
var num = (var)Count[idx(s,t)] + Alpha;
var den = (var)RowSum[s] + Alpha*MC_STATES;
if(den<=0) return 1.0/MC_STATES;
return num/den;
}
// robust row stats
void rowStats(int s, var* outPBull, var* outEntropy) {
if(outPBull) *outPBull=0.5;
if(outEntropy) *outEntropy=1.0;
if(!Count || !RowSum) return;
if(!(Alpha > 0)) Alpha = 1.0;
if(s <= 0 || s >= MC_STATES) return;
if(RowSum[s] <= 0) return;
var den = (var)RowSum[s] + Alpha*(var)MC_STATES;
if(!(den > 0)) return;
var Z=0, pBull=0;
int t;
for(t=1;t<MC_STATES;t++){
var p = ((var)Count[idx(s,t)] + Alpha) / den;
Z += p;
if(isBull(t)) pBull += p;
}
if(!(Z>0)) return;
var H=0;
var Hmax = log((var)(MC_STATES-1));
if(!(Hmax > 0)) Hmax = 1.0;
for(t=1;t<MC_STATES;t++){
var p = (((var)Count[idx(s,t)] + Alpha) / den) / Z;
if(p>0) H += -p*log(p);
}
if(outPBull) *outPBull = pBull / Z;
if(outEntropy) *outEntropy = H / Hmax;
}
};
// ======================================================================
// =========================== Logger (component) ========================
class Alpha12Logger {
int wroteHeader = 0;
public:
void writeEqHeaderOnce(){
if(wroteHeader) return;
wroteHeader = 1;
file_append("Log\\Alpha12_eq_all.csv",
"Bar,Epoch,Ctx,EqCount,i,n1,n2,TreeId,Depth,Rate,Pred,Adv,Prop,Mode,WAdv,WTree,PBull,Entropy,MCState,ExprLen,ExprHash,tanhN,sinN,cosN\n");
}
static void strlcat_safe(string dst, string src, int cap) {
if(!dst || !src || cap <= 0) return;
int dl = (int)strlen(dst);
int sl = (int)strlen(src);
int room = cap - 1 - dl;
if(room <= 0){ if(cap > 0) dst[cap-1] = 0; return; }
int i;
for(i = 0; i < room && i < sl; i++) dst[dl + i] = src[i];
dst[dl + i] = 0;
}
static int countSubStr(string s, string sub){
if(!s || !sub) return 0;
int n=0; string p=s; int sublen = (int)strlen(sub); if(sublen<=0) return 0;
while((p=strstr(p,sub))){ n++; p += sublen; }
return n;
}
static int djb2_hash(string s){
int h = 5381, c, i = 0;
if(!s) return h;
while((c = s[i++])) h = ((h<<5)+h) ^ c;
return h & 0x7fffffff;
}
void appendEqMetaLine(
int bar, int epoch, int ctx,
int i, int n1, int n2, int tid, int depth, var rate, var pred, var adv, var prop, int mode,
var wadv, var wtree, var pbull, var ent, int mcstate, string expr)
{
if(i >= LOG_EQ_SAMPLE) return;
int eLen = 0, eHash = 0, cT = 0, cS = 0, cC = 0;
if(expr){
eLen = (int)strlen(expr);
eHash = (int)djb2_hash(expr);
cT = countSubStr(expr,"tanh(");
cS = countSubStr(expr,"sin(");
cC = countSubStr(expr,"cos(");
} else {
eHash = (int)djb2_hash("");
}
file_append("Log\\Alpha12_eq_all.csv",
strf("%i,%i,%i,%i,%i,%i,%i,%i,%i,%.4f,%.4f,%.4f,%.4f,%i,%.3f,%.3f,%.4f,%.4f,%i,%i,%i,%i,%i,%i\n",
bar, epoch, ctx, NET_EQNS, i, n1, n2, tid, depth,
rate, pred, adv, prop, mode, wadv, wtree, pbull, ent,
mcstate, eLen, eHash, cT, cS, cC));
}
};
// ======================================================================
// =========================== NetState (component) ======================
class NetState {
public:
int N = NET_EQNS;
int D = DEGREE;
int K = KPROJ;
// core
var* State = 0;
var* Prev = 0;
var* StateSq = 0;
// adjacency & projection
i16* Adj = 0;
fvar* RP = 0;
fvar* Z = 0;
i8* Mode= 0;
// weights & params
fvar* WSelf=0; fvar* WN1=0; fvar* WN2=0; fvar* WGlob1=0; fvar* WGlob2=0; fvar* WMom=0; fvar* WTree=0; fvar* WAdv=0;
fvar *A1x=0,*A1lam=0,*A1mean=0,*A1E=0,*A1P=0,*A1i=0,*A1c=0;
fvar *A2x=0,*A2lam=0,*A2mean=0,*A2E=0,*A2P=0,*A2i=0,*A2c=0;
fvar *G1mean=0,*G1E=0,*G2P=0,*G2lam=0;
fvar* TreeTerm = 0;
i16* TopEq = 0;
fvar* TopW = 0;
i16* EqTreeId = 0;
fvar* TAlpha=0;
fvar* TBeta =0;
fvar* PropRaw=0;
fvar* Prop =0;
// expression buffers (optional)
string* Sym = 0;
int SymFreed = 0;
// Hit-rate
fvar* HitEW = 0;
int* HitN = 0;
fvar* AdvPrev = 0;
var Ret1 = 0;
// Projection cache guards
int ProjBar = -1;
int ProjK = -1;
// effective projection dim
int Keff = KPROJ;
public:
void allocate() {
int n=N, d=D, k=K;
State = (var*)malloc(n*sizeof(var));
Prev = (var*)malloc(n*sizeof(var));
StateSq = (var*)malloc(n*sizeof(var));
Adj = (i16*)malloc(n*d*sizeof(i16));
RP = (fvar*)malloc(k*n*sizeof(fvar));
Z = (fvar*)malloc(k*sizeof(fvar));
Mode = (i8*)malloc(n*sizeof(i8));
WSelf=(fvar*)malloc(n*sizeof(fvar));
WN1=(fvar*)malloc(n*sizeof(fvar));
WN2=(fvar*)malloc(n*sizeof(fvar));
WGlob1=(fvar*)malloc(n*sizeof(fvar));
WGlob2=(fvar*)malloc(n*sizeof(fvar));
WMom=(fvar*)malloc(n*sizeof(fvar));
WTree=(fvar*)malloc(n*sizeof(fvar));
WAdv=(fvar*)malloc(n*sizeof(fvar));
A1x=(fvar*)malloc(n*sizeof(fvar)); A1lam=(fvar*)malloc(n*sizeof(fvar)); A1mean=(fvar*)malloc(n*sizeof(fvar));
A1E=(fvar*)malloc(n*sizeof(fvar)); A1P=(fvar*)malloc(n*sizeof(fvar)); A1i=(fvar*)malloc(n*sizeof(fvar)); A1c=(fvar*)malloc(n*sizeof(fvar));
A2x=(fvar*)malloc(n*sizeof(fvar)); A2lam=(fvar*)malloc(n*sizeof(fvar)); A2mean=(fvar*)malloc(n*sizeof(fvar));
A2E=(fvar*)malloc(n*sizeof(fvar)); A2P=(fvar*)malloc(n*sizeof(fvar)); A2i=(fvar*)malloc(n*sizeof(fvar)); A2c=(fvar*)malloc(n*sizeof(fvar));
G1mean=(fvar*)malloc(n*sizeof(fvar)); G1E=(fvar*)malloc(n*sizeof(fvar));
G2P=(fvar*)malloc(n*sizeof(fvar)); G2lam=(fvar*)malloc(n*sizeof(fvar));
TAlpha=(fvar*)malloc(n*sizeof(fvar));
TBeta =(fvar*)malloc(n*sizeof(fvar));
TreeTerm=(fvar*)malloc(n*sizeof(fvar));
TopEq=(i16*)malloc(n*sizeof(i16));
TopW =(fvar*)malloc(n*sizeof(fvar));
PropRaw=(fvar*)malloc(n*sizeof(fvar));
Prop =(fvar*)malloc(n*sizeof(fvar));
EqTreeId=(i16*)malloc(n*sizeof(i16));
if(LOG_EXPR_TEXT) Sym = (string*)malloc(n*sizeof(char*)); else Sym = 0;
// init adjacency
{ int t; for(t=0;t<n*d;t++) Adj[t] = -1; }
// init core arrays
{
int i;
for(i=0;i<n;i++){
State[i]=random(); Prev[i]=State[i]; StateSq[i]=State[i]*State[i];
Mode[i]=0;
WSelf[i]=0.5f; WN1[i]=0.2f; WN2[i]=0.2f;
WGlob1[i]=0.1f; WGlob2[i]=0.1f; WMom[i]=0.05f;
WTree[i]=0.15f; WAdv[i]=0.15f;
A1x[i]=1; A1lam[i]=0.1f; A1mean[i]=0; A1E[i]=0; A1P[i]=0; A1i[i]=0; A1c[i]=0;
A2x[i]=1; A2lam[i]=0.1f; A2mean[i]=0; A2E[i]=0; A2P[i]=0; A2i[i]=0; A2c[i]=0;
G1mean[i]=1.0f; G1E[i]=0.001f;
G2P[i]=0.6f; G2lam[i]=0.3f;
TAlpha[i]=0.8f; TBeta[i]=25.0f;
TreeTerm[i]=0;
TopEq[i]=-1; TopW[i]=0;
PropRaw[i]=1; Prop[i]=(fvar)(1.0/n);
if(LOG_EXPR_TEXT){
Sym[i] = (char*)malloc(EXPR_MAXLEN);
if(Sym[i]) strcpy(Sym[i],"");
}
}
}
// hit-rate arrays
HitEW = (fvar*)malloc(n*sizeof(fvar));
HitN = (int*)malloc(n*sizeof(int));
AdvPrev = (fvar*)malloc(n*sizeof(fvar));
{ int i; for(i=0;i<n;i++){ HitEW[i]=0.5f; HitN[i]=0; AdvPrev[i]=0; } }
// projection guards
ProjBar = -1; ProjK = -1;
}
void freeAll() {
int i;
if(State)free(State);
if(Prev)free(Prev);
if(StateSq)free(StateSq);
if(Adj)free(Adj);
if(RP)free(RP);
if(Z)free(Z);
if(Mode)free(Mode);
if(WSelf)free(WSelf);
if(WN1)free(WN1);
if(WN2)free(WN2);
if(WGlob1)free(WGlob1);
if(WGlob2)free(WGlob2);
if(WMom)free(WMom);
if(WTree)free(WTree);
if(WAdv)free(WAdv);
if(A1x)free(A1x); if(A1lam)free(A1lam); if(A1mean)free(A1mean);
if(A1E)free(A1E); if(A1P)free(A1P); if(A1i)free(A1i); if(A1c)free(A1c);
if(A2x)free(A2x); if(A2lam)free(A2lam); if(A2mean)free(A2mean);
if(A2E)free(A2E); if(A2P)free(A2P); if(A2i)free(A2i); if(A2c)free(A2c);
if(G1mean)free(G1mean); if(G1E)free(G1E);
if(G2P)free(G2P); if(G2lam)free(G2lam);
if(TAlpha)free(TAlpha); if(TBeta)free(TBeta);
if(TreeTerm)free(TreeTerm);
if(TopEq)free(TopEq);
if(TopW)free(TopW);
if(EqTreeId)free(EqTreeId);
if(PropRaw)free(PropRaw);
if(Prop)free(Prop);
if(Sym){
for(i=0;i<N;i++) if(Sym[i]) free(Sym[i]);
free(Sym);
}
Sym = 0;
if(HitEW) free(HitEW);
if(HitN) free(HitN);
if(AdvPrev) free(AdvPrev);
// null everything
State=Prev=StateSq=0; Adj=0; RP=0; Z=0; Mode=0;
WSelf=WN1=WN2=WGlob1=WGlob2=WMom=WTree=WAdv=0;
A1x=A1lam=A1mean=A1E=A1P=A1i=A1c=0;
A2x=A2lam=A2mean=A2E=A2P=A2i=A2c=0;
G1mean=G1E=G2P=G2lam=0;
TAlpha=TBeta=0;
TreeTerm=0; TopEq=0; TopW=0; EqTreeId=0;
PropRaw=0; Prop=0;
HitEW=0; HitN=0; AdvPrev=0;
}
void randomizeRP(){
int k=K, n=N;
int kk,j;
for(kk=0;kk<k;kk++)
for(j=0;j<n;j++)
RP[kk*n+j] = ifelse(random(1) < 0.5, -1.0, 1.0);
}
int keffClamped() const {
int kk = Keff;
if(kk < 0) kk = 0;
if(kk > K) kk = K;
return kk;
}
void computeProjection(){
if(!RP || !Z || !StateSq) return;
int kk = keffClamped();
if(ProjBar == Bar && ProjK == kk) return;
int k, j;
for(k=0;k<kk;k++){
var acc=0;
for(j=0;j<N;j++) acc += (var)RP[k*N + j] * StateSq[j];
Z[k] = (fvar)acc;
}
ProjBar = Bar;
ProjK = kk;
}
void sanitizeAdjacency(){
if(!Adj) return;
int i,d;
for(i=0;i<N;i++){
for(d=0;d<D;d++){
i16* p = &Adj[i*D + d];
if(*p < 0 || *p >= N || *p == i){
int r = (int)random(N);
if(r==i) r = (r+1)%N;
*p = (i16)r;
}
}
if(D >= 2 && Adj[i*D+0] == Adj[i*D+1]){
int r2 = (Adj[i*D+1] + 1) % N;
if(r2 == i) r2 = (r2+1)%N;
Adj[i*D+1] = (i16)r2;
}
}
}
int adjSafe(int i, int d) const {
if(!Adj || N<=1 || D<=0) return 0;
if(d<0) d=0;
if(d>=D) d = d % D;
int v = Adj[i*D + d];
if(v<0 || v>=N || v==i) v = (i+1)%N;
return v;
}
void normalizeProportions(){
int i;
var s=0;
for(i=0;i<N;i++) s += PropRaw[i];
if(s<=0){
for(i=0;i<N;i++) Prop[i] = (fvar)(1.0/N);
return;
}
for(i=0;i<N;i++) Prop[i] = (fvar)(PropRaw[i]/s);
}
};
// ======================================================================
// ============================= DTree (component) =======================
class DTree {
public:
NodePool* pool = 0;
Node* Root = 0;
Node** TreeIdx = 0;
int TreeN = 0;
int TreeCap = 0;
var DTreeExp = 0;
var* DepthW = 0;
var DepthExpLast = -1.0;
Node DummyNode;
// Predictability cache
var* PredNode = 0;
int PredLen = 0;
int PredCap = 0;
int PredCacheBar = -1;
// equation-cycle angles
var* EqTheta = 0;
int LeadEq = -1;
var LeadTh = 0;
var CycPh = 0;
var CycSpd = 0;
public:
DTree() { memset(&DummyNode,0,sizeof(DummyNode)); }
void bindPool(NodePool* p){ pool = p; }
void allocDepthLUT(){
int sz = MAX_DEPTH + 1;
if(!DepthW) DepthW = (var*)malloc(sz*sizeof(var));
if(!DepthW) quit("Alpha12: OOM DepthW");
}
void freeAll(){
if(Root) freeTree(Root);
Root = 0;
if(TreeIdx) free(TreeIdx);
TreeIdx = 0; TreeN=0; TreeCap=0;
if(DepthW) free(DepthW);
DepthW=0;
if(PredNode) free(PredNode);
PredNode=0; PredLen=0; PredCap=0;
if(EqTheta) free(EqTheta);
EqTheta=0;
}
// Tree byte size
int tree_bytes(Node* u){
if(!u) return 0;
int SZV = sizeof(var), SZI = sizeof(int), SZP = sizeof(void*);
int sz_node = 2*SZV + SZP + 2*SZI;
int total = sz_node;
if(u->n > 0 && u->c) total += u->n * SZP;
int i;
for(i=0;i<u->n;i++) total += tree_bytes(((Node**)u->c)[i]);
return total;
}
void refreshDepthW(){
if(!DepthW) return;
int d;
for(d=0; d<=MAX_DEPTH; d++) DepthW[d] = 1.0 / pow(d+1, DTreeExp);
DepthExpLast = DTreeExp;
}
Node* createNode(int depth){
if(!pool) quit("Alpha12: DTree pool not bound");
Node* u = pool->allocNode();
if(!u) return 0;
u->v = random();
u->r = 0.01 + 0.02*depth + random(0.005);
u->d = depth;
if(depth > 0){
u->n = 1 + (int)random(MAX_BRANCHES);
u->c = (void**)malloc(u->n*sizeof(void*));
if(!u->c){ u->n=0; u->c=0; return u; }
int i;
for(i=0;i<u->n;i++){
((Node**)u->c)[i] = createNode(depth-1);
}
} else {
u->n=0; u->c=0;
}
return u;
}
var evaluateNode(Node* u){
if(!u) return 0;
var sum=0; int i;
for(i=0;i<u->n;i++) sum += evaluateNode(((Node**)u->c)[i]);
if(DepthExpLast < 0 || abs(DTreeExp - DepthExpLast) > 1e-9) refreshDepthW();
var phase = sin(u->r * Bar + sum);
var weight = DepthW[u->d];
u->v = (1 - weight)*u->v + weight*phase;
return u->v;
}
void freeTree(Node* u){
if(!u) return;
int i;
for(i=0;i<u->n;i++) freeTree(((Node**)u->c)[i]);
if(u->c) free(u->c);
pool->freeNode(u);
}
void pushTreeNode(Node* u){
if(TreeN >= TreeCap){
int newCap = TreeCap*2;
if(newCap < 64) newCap = 64;
TreeIdx = (Node**)realloc(TreeIdx, newCap*sizeof(Node*));
TreeCap = newCap;
}
TreeIdx[TreeN++] = u;
}
void indexTreeDFS(Node* u){
if(!u) return;
pushTreeNode(u);
int i;
for(i=0;i<u->n;i++) indexTreeDFS(((Node**)u->c)[i]);
}
void ensurePredCache(){
if(PredCacheBar != Bar){
if(PredNode){
int i;
for(i=0;i<PredLen;i++) PredNode[i] = -2;
}
PredCacheBar = Bar;
}
}
var nodePredictability(Node* t){
if(!t) return 0.5;
var disp = 0; int n=t->n, i, cnt=0;
if(t->c){
for(i=0;i<n;i++){
Node* c = ((Node**)t->c)[i];
if(c){ disp += abs(c->v - t->v); cnt++; }
}
if(cnt>0) disp /= cnt;
}
var depthFac = 1.0/(1 + t->d);
var rateBase = 0.01 + 0.02*t->d;
var rateFac = exp(-25.0*abs(t->r - rateBase));
var p = 0.5*(depthFac + rateFac);
p = 0.5*p + 0.5*(1.0 + (-disp));
if(p<0) p=0; if(p>1) p=1;
return p;
}
var predByTid(int tid){
if(!TreeIdx || tid < 0 || tid >= TreeN || !TreeIdx[tid]) return 0.5;
ensurePredCache();
if(PredNode && tid < PredLen && PredNode[tid] > -1.5) return PredNode[tid];
Node* t = TreeIdx[tid];
var p = nodePredictability(t);
if(PredNode && tid < PredLen) PredNode[tid] = p;
return p;
}
Node* treeAt(int tid){
if(!TreeIdx || tid < 0 || tid >= TreeN || !TreeIdx[tid]) return &DummyNode;
return TreeIdx[tid];
}
int safeTreeIndexFromEq(int eqTreeId, int treeN){
int denom = ifelse(treeN>0, treeN, 1);
int tid = eqTreeId;
if(tid < 0) tid = 0;
if(denom > 0) tid = tid % denom;
if(tid < 0) tid = 0;
return tid;
}
void maybeShrinkTreeIdx(){
if(!TreeIdx) return;
if(TreeCap > 64 && TreeN < (TreeCap>>1)){
int newCap = (TreeCap>>1);
if(newCap < 64) newCap = 64;
TreeIdx = (Node**)realloc(TreeIdx, newCap*sizeof(Node*));
TreeCap = newCap;
}
}
void resizePredCacheToTree(){
PredLen = TreeN; if(PredLen <= 0) PredLen = 1;
if(PredLen > PredCap){
if(PredNode) free(PredNode);
PredNode = (var*)malloc(PredLen*sizeof(var));
PredCap = PredLen;
}
PredCacheBar = -1;
}
// equation ring angles (needs EqTreeId mapping provided externally)
void refreshEqAngles(const i16* eqTreeId, int eqN){
if(!EqTheta) EqTheta = (var*)malloc(eqN*sizeof(var));
if(!EqTheta) quit("Alpha12: OOM EqTheta");
var twoPi = 2.*3.141592653589793;
var denom = ifelse(TreeN>0,(var)TreeN,1.0);
int i;
for(i=0;i<eqN;i++){
int tid = safeTreeIndexFromEq((int)eqTreeId[i], TreeN);
var u = ((var)tid)/denom;
EqTheta[i] = twoPi*u;
}
}
static var pi(){ return 3.141592653589793; }
static var wrapPi(var a){
while(a <= -pi()) a += 2.*pi();
while(a > pi()) a -= 2.*pi();
return a;
}
static var angDiff(var a, var b){ return wrapPi(b - a); }
void updateEquationCycle(const fvar* prop, int N){
if(!EqTheta){ CycPh = wrapPi(CycPh); return; }
int i, bestI=0;
var bestP=-1;
for(i=0;i<N;i++){
var p = (var)prop[i];
if(p > bestP){ bestP=p; bestI=i; }
}
var th = EqTheta[bestI];
var d = angDiff(LeadTh, th);
CycSpd = 0.9*CycSpd + 0.1*d;
CycPh = wrapPi(CycPh + CycSpd);
LeadEq = bestI;
LeadTh = th;
}
// --- prune & grow helpers (direct lift) ---
var nodeImportance(Node* u){
if(!u) return 0;
var amp = abs(u->v); if(amp>1) amp=1;
var p = nodePredictability(u);
var depthW = 1.0/(1.0 + u->d);
var imp = (0.6*p + 0.4*amp) * depthW;
return imp;
}
Node* createLeafDepth(int d){
Node* u = pool->allocNode();
if(!u) return 0;
u->v = random();
u->r = 0.01 + 0.02*d + random(0.005);
u->n = 0;
u->c = 0;
u->d = d;
return u;
}
void growSelectiveAtDepth(Node* u, int frontierDepth, int addK){
if(!u) return;
if(u->d == frontierDepth){
int want = addK; if(want<=0) return;
int oldN=u->n, newN=oldN+want;
Node** Cnew = (Node**)malloc(newN*sizeof(void*));
if(!Cnew) return;
if(oldN>0 && u->c) memcpy(Cnew,u->c,oldN*sizeof(void*));
int i;
for(i=oldN;i<newN;i++) Cnew[i] = createLeafDepth(frontierDepth-1);
if(u->c) free(u->c);
u->c = Cnew;
u->n = newN;
return;
}
int j;
for(j=0;j<u->n;j++) growSelectiveAtDepth(((Node**)u->c)[j],frontierDepth,addK);
}
void freeChildAt(Node* parent, int idx){
if(!parent || !parent->c) return;
Node** C = (Node**)parent->c;
freeTree(C[idx]);
int i;
for(i=idx+1;i<parent->n;i++) C[i-1]=C[i];
parent->n--;
if(parent->n==0){ free(parent->c); parent->c=0; }
}
void pruneSelectiveAtDepth(Node* u, int targetDepth, int keepK){
if(!u) return;
if(u->d == targetDepth-1 && u->n > 0){
int n=u->n, i, kept=0;
int mark[16]; for(i=0;i<16;i++) mark[i]=0;
int iter;
for(iter=0; iter<keepK && iter<n; iter++){
int bestI=-1; var bestImp=-1;
for(i=0;i<n;i++){
if(i<16 && mark[i]==1) continue;
var imp = nodeImportance(((Node**)u->c)[i]);
if(imp > bestImp){ bestImp=imp; bestI=i; }
}
if(bestI>=0 && bestI<16){ mark[bestI]=1; kept++; }
}
for(i=n-1;i>=0;i--) if(i<16 && mark[i]==0) freeChildAt(u,i);
return;
}
int j; for(j=0;j<u->n;j++) pruneSelectiveAtDepth(((Node**)u->c)[j],targetDepth,keepK);
}
};
// ======================================================================
// ======================== RuntimeManager (component) ===================
class RuntimeManager {
public:
int MemFixedBytes = 0;
int TreeBytesCached = 0;
int ShedStage = 0;
int LastDepthActBar = -999999;
int ChartsOff = 0;
int LogsOff = 0;
int RT_TreeMaxDepth = MAX_DEPTH;
// Accuracy sentinel
var ACC_mx=0, ACC_my=0, ACC_mx2=0, ACC_my2=0, ACC_mxy=0;
var AccCorr=0;
var AccBase=0;
int HaveBase=0;
// Elastic depth tuner
var UtilBefore=0, UtilAfter=0;
int TunePending=0;
int TuneStartBar=0;
int TuneAction=0;
var DTreeExpStep = 0.05;
int DTreeExpDir = 1;
public:
int mem_bytes_est() const { return MemFixedBytes + TreeBytesCached; }
int mem_mb_est() const { return mem_bytes_est()/(1024*1024); }
void recalcTreeBytes(DTree& dt){ TreeBytesCached = dt.tree_bytes(dt.Root); }
void computeMemFixedBytes(const NetState& net, const DTree& dt, int includeExprBytes){
int N=net.N, D=net.D, K=net.K;
int SZV=sizeof(var), SZF=sizeof(fvar), SZI16=sizeof(i16), SZI8=sizeof(i8), SZP=sizeof(void*);
int b=0;
b += N*SZV*2; // State, Prev
b += N*SZV; // StateSq
b += N*D*SZI16; // Adj
b += N*SZI16; // EqTreeId
b += N*SZI8; // Mode
b += K*N*SZF; // RP
b += K*SZF; // Z
b += N*SZF*(8); // weights
b += N*SZF*(7+7); // A1*, A2*
b += N*SZF*(2+2); // G1mean,G1E,G2P,G2lam
b += N*SZF*(2); // TAlpha,TBeta
b += N*SZF*(1); // TreeTerm
b += N*(SZI16 + SZF); // TopEq,TopW
b += N*SZF*2; // PropRaw,Prop
b += N*SZF; // HitEW
b += N*SZF; // AdvPrev
b += N*sizeof(int); // HitN
// Markov: caller adds separately if desired (we do it in strategy since there are 3 chains)
b += dt.TreeCap*SZP; // Tree index capacity
if(includeExprBytes) b += N*EXPR_MAXLEN;
MemFixedBytes = b;
}
void shed_zero_cost_once(){
if(ShedStage > 0) return;
set(PLOTNOW|OFF);
ChartsOff = 1;
LogsOff = 1;
ShedStage = 1;
}
void acc_update(var x, var y){
var a=0.01;
ACC_mx=(1-a)*ACC_mx + a*x;
ACC_my=(1-a)*ACC_my + a*y;
ACC_mx2=(1-a)*ACC_mx2 + a*(x*x);
ACC_my2=(1-a)*ACC_my2 + a*(y*y);
ACC_mxy=(1-a)*ACC_mxy + a*(x*y);
var vx = ACC_mx2 - ACC_mx*ACC_mx;
var vy = ACC_my2 - ACC_my*ACC_my;
var cv = ACC_mxy - ACC_mx*ACC_my;
if(vx>0 && vy>0) AccCorr = cv / sqrt(vx*vy);
else AccCorr = 0;
if(!HaveBase){ AccBase=AccCorr; HaveBase=1; }
}
var util_now(){
int mb = mem_mb_est();
var mem_pen = 0;
if(mb > MEM_BUDGET_MB) mem_pen = (mb - MEM_BUDGET_MB)/(var)MEM_BUDGET_MB;
return AccCorr - 0.5*mem_pen;
}
void depth_manager_runtime(DTree& dt){
int trigger = MEM_BUDGET_MB - MEM_HEADROOM_MB;
int mb = mem_mb_est();
if(mb < trigger) return;
if(ShedStage == 0) shed_zero_cost_once();
if(ShedStage <= 1) ShedStage = 2;
int overBudget = (mb >= MEM_BUDGET_MB);
if(!overBudget && (Bar - LastDepthActBar < DEPTH_STEP_BARS)) return;
while(RT_TreeMaxDepth > RUNTIME_MIN_DEPTH) {
int keepK = ifelse(mem_mb_est() < MEM_BUDGET_MB + 2, KEEP_CHILDREN_HI, KEEP_CHILDREN_LO);
dt.pruneSelectiveAtDepth(dt.Root, RT_TreeMaxDepth, keepK);
RT_TreeMaxDepth--;
// reindex caller does (strategy)
mb = mem_mb_est();
printf("\n[DepthMgr] depth=%i keepK=%i est=%i MB", RT_TreeMaxDepth, keepK, mb);
if(mb < trigger) break;
}
LastDepthActBar = Bar;
}
};
// ===========================================================
// CUDA Advisor (Driver API + NVRTC)
// - Batch inference for ADV_EQ_NF features
// - Simple default model: tanh(dot(W,x)+b) in [-1..1]
// - Loads weights from Data\Alpha12_cuda_w.bin (float32)
// ===========================================================
class CudaAdvisor {
public:
// --- minimal CUDA driver types ---
typedef int CUresult;
typedef int CUdevice;
typedef struct CUctx_st* CUcontext;
typedef struct CUmod_st* CUmodule;
typedef struct CUfunc_st* CUfunction;
typedef unsigned long long CUdeviceptr;
static const int CUDA_SUCCESS = 0;
// --- minimal NVRTC types ---
typedef int nvrtcResult;
typedef struct _nvrtcProgram* nvrtcProgram;
static const int NVRTC_SUCCESS = 0;
// ---- function pointer types (nvcuda.dll) ----
typedef CUresult (__stdcall *PFN_cuInit)(unsigned int);
typedef CUresult (__stdcall *PFN_cuDeviceGetCount)(int*);
typedef CUresult (__stdcall *PFN_cuDeviceGet)(CUdevice*, int);
typedef CUresult (__stdcall *PFN_cuDeviceComputeCapability)(int*, int*, CUdevice);
typedef CUresult (__stdcall *PFN_cuCtxCreate_v2)(CUcontext*, unsigned int, CUdevice);
typedef CUresult (__stdcall *PFN_cuCtxDestroy_v2)(CUcontext);
typedef CUresult (__stdcall *PFN_cuModuleLoadDataEx)(CUmodule*, const void*, unsigned int, void*, void*);
typedef CUresult (__stdcall *PFN_cuModuleGetFunction)(CUfunction*, CUmodule, const char*);
typedef CUresult (__stdcall *PFN_cuModuleUnload)(CUmodule);
typedef CUresult (__stdcall *PFN_cuMemAlloc_v2)(CUdeviceptr*, size_t);
typedef CUresult (__stdcall *PFN_cuMemFree_v2)(CUdeviceptr);
typedef CUresult (__stdcall *PFN_cuMemcpyHtoD_v2)(CUdeviceptr, const void*, size_t);
typedef CUresult (__stdcall *PFN_cuMemcpyDtoH_v2)(void*, CUdeviceptr, size_t);
typedef CUresult (__stdcall *PFN_cuLaunchKernel)(
CUfunction,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int,
void*,
void**,
void**);
typedef CUresult (__stdcall *PFN_cuCtxSynchronize)(void);
// ---- NVRTC pointers ----
typedef nvrtcResult (__stdcall *PFN_nvrtcCreateProgram)(
nvrtcProgram*,
const char*,
const char*,
int,
const char* const*,
const char* const*);
typedef nvrtcResult (__stdcall *PFN_nvrtcCompileProgram)(nvrtcProgram, int, const char* const*);
typedef nvrtcResult (__stdcall *PFN_nvrtcGetProgramLogSize)(nvrtcProgram, size_t*);
typedef nvrtcResult (__stdcall *PFN_nvrtcGetProgramLog)(nvrtcProgram, char*);
typedef nvrtcResult (__stdcall *PFN_nvrtcGetPTXSize)(nvrtcProgram, size_t*);
typedef nvrtcResult (__stdcall *PFN_nvrtcGetPTX)(nvrtcProgram, char*);
typedef nvrtcResult (__stdcall *PFN_nvrtcDestroyProgram)(nvrtcProgram*);
private:
// DLLs
HMODULE hCU = 0;
HMODULE hNVRTC= 0;
// CUDA function pointers
PFN_cuInit p_cuInit = 0;
PFN_cuDeviceGetCount p_cuDeviceGetCount = 0;
PFN_cuDeviceGet p_cuDeviceGet = 0;
PFN_cuDeviceComputeCapability p_cuDeviceComputeCapability = 0;
PFN_cuCtxCreate_v2 p_cuCtxCreate = 0;
PFN_cuCtxDestroy_v2 p_cuCtxDestroy = 0;
PFN_cuModuleLoadDataEx p_cuModuleLoadDataEx = 0;
PFN_cuModuleGetFunction p_cuModuleGetFunction = 0;
PFN_cuModuleUnload p_cuModuleUnload = 0;
PFN_cuMemAlloc_v2 p_cuMemAlloc = 0;
PFN_cuMemFree_v2 p_cuMemFree = 0;
PFN_cuMemcpyHtoD_v2 p_cuMemcpyHtoD = 0;
PFN_cuMemcpyDtoH_v2 p_cuMemcpyDtoH = 0;
PFN_cuLaunchKernel p_cuLaunchKernel = 0;
PFN_cuCtxSynchronize p_cuCtxSynchronize = 0;
// NVRTC function pointers
PFN_nvrtcCreateProgram p_nvrtcCreateProgram = 0;
PFN_nvrtcCompileProgram p_nvrtcCompileProgram = 0;
PFN_nvrtcGetProgramLogSize p_nvrtcGetProgramLogSize = 0;
PFN_nvrtcGetProgramLog p_nvrtcGetProgramLog = 0;
PFN_nvrtcGetPTXSize p_nvrtcGetPTXSize = 0;
PFN_nvrtcGetPTX p_nvrtcGetPTX = 0;
PFN_nvrtcDestroyProgram p_nvrtcDestroyProgram = 0;
// CUDA state
int ready = 0;
CUdevice dev = 0;
CUcontext ctx = 0;
CUmodule mod = 0;
CUfunction func = 0;
// device buffers
CUdeviceptr dX = 0; // float[maxBatch * nf]
CUdeviceptr dOut = 0; // float[maxBatch]
CUdeviceptr dW = 0; // float[nf]
CUdeviceptr dB = 0; // float[1]
int maxBatch = 0;
int nf = 0;
// host staging (to avoid realloc)
float* hX = 0; // float[maxBatch*nf]
float* hOut = 0; // float[maxBatch]
// simple weights (host)
float* hW = 0; // float[nf]
float hB = 0;
// kernel source (NVRTC)
const char* kSrc =
"extern \"C\" __device__ __forceinline__ float tanh_approx(float x){\n"
" // rational approx, good enough for gating/advice in [-5..5]\n"
" if(x > 5.0f) return 1.0f;\n"
" if(x < -5.0f) return -1.0f;\n"
" float x2 = x*x;\n"
" return x*(27.0f + x2)/(27.0f + 9.0f*x2);\n"
"}\n"
"extern \"C\" __global__ void advise_kernel(const float* X, const float* W, const float* B, float* out, int nf, int batch)\n"
"{\n"
" int i = (int)(blockIdx.x * blockDim.x + threadIdx.x);\n"
" if(i >= batch) return;\n"
" const float* x = X + i*nf;\n"
" float acc = 0.0f;\n"
" for(int k=0;k<nf;k++) acc += x[k]*W[k];\n"
" acc += B[0];\n"
" out[i] = tanh_approx(acc);\n"
"}\n";
private:
static FARPROC sym(HMODULE h, const char* name) {
if(!h) return 0;
return GetProcAddress(h, name);
}
static HMODULE load_nvrtc_any() {
const char* names[] = {
"nvrtc64_123_0.dll","nvrtc64_122_0.dll","nvrtc64_121_0.dll","nvrtc64_120_0.dll",
"nvrtc64_118_0.dll","nvrtc64_117_0.dll","nvrtc64_116_0.dll","nvrtc64_115_0.dll",
"nvrtc64_114_0.dll","nvrtc64_113_0.dll","nvrtc64_112_0.dll","nvrtc64_111_0.dll",
"nvrtc64_110_0.dll","nvrtc64_102_0.dll","nvrtc64_101_0.dll","nvrtc64_100_0.dll",
"nvrtc64_92.dll"
};
for(int i=0;i<(int)(sizeof(names)/sizeof(names[0])); i++) {
HMODULE h = LoadLibraryA(names[i]);
if(h) return h;
}
return LoadLibraryA("nvrtc64.dll");
}
void release_all() {
if(dB && p_cuMemFree) { p_cuMemFree(dB); dB=0; }
if(dW && p_cuMemFree) { p_cuMemFree(dW); dW=0; }
if(dOut && p_cuMemFree) { p_cuMemFree(dOut); dOut=0; }
if(dX && p_cuMemFree) { p_cuMemFree(dX); dX=0; }
if(mod && p_cuModuleUnload) { p_cuModuleUnload(mod); mod=0; }
if(ctx && p_cuCtxDestroy) { p_cuCtxDestroy(ctx); ctx=0; }
if(hOut) { free(hOut); hOut=0; }
if(hX) { free(hX); hX=0; }
if(hW) { free(hW); hW=0; }
ready = 0;
if(hNVRTC) { FreeLibrary(hNVRTC); hNVRTC=0; }
if(hCU) { FreeLibrary(hCU); hCU=0; }
}
int load_symbols() {
hCU = LoadLibraryA("nvcuda.dll");
if(!hCU) return 0;
p_cuInit = (PFN_cuInit)sym(hCU, "cuInit");
p_cuDeviceGetCount = (PFN_cuDeviceGetCount)sym(hCU, "cuDeviceGetCount");
p_cuDeviceGet = (PFN_cuDeviceGet)sym(hCU, "cuDeviceGet");
p_cuDeviceComputeCapability = (PFN_cuDeviceComputeCapability)sym(hCU, "cuDeviceComputeCapability");
p_cuCtxCreate = (PFN_cuCtxCreate_v2)sym(hCU, "cuCtxCreate_v2");
p_cuCtxDestroy= (PFN_cuCtxDestroy_v2)sym(hCU, "cuCtxDestroy_v2");
p_cuModuleLoadDataEx = (PFN_cuModuleLoadDataEx)sym(hCU, "cuModuleLoadDataEx");
p_cuModuleGetFunction= (PFN_cuModuleGetFunction)sym(hCU, "cuModuleGetFunction");
p_cuModuleUnload = (PFN_cuModuleUnload)sym(hCU, "cuModuleUnload");
p_cuMemAlloc = (PFN_cuMemAlloc_v2) sym(hCU, "cuMemAlloc_v2");
p_cuMemFree = (PFN_cuMemFree_v2) sym(hCU, "cuMemFree_v2");
p_cuMemcpyHtoD = (PFN_cuMemcpyHtoD_v2) sym(hCU, "cuMemcpyHtoD_v2");
p_cuMemcpyDtoH = (PFN_cuMemcpyDtoH_v2) sym(hCU, "cuMemcpyDtoH_v2");
p_cuLaunchKernel = (PFN_cuLaunchKernel)sym(hCU, "cuLaunchKernel");
p_cuCtxSynchronize= (PFN_cuCtxSynchronize)sym(hCU, "cuCtxSynchronize");
if(!p_cuInit || !p_cuDeviceGetCount || !p_cuDeviceGet || !p_cuCtxCreate || !p_cuCtxDestroy ||
!p_cuModuleLoadDataEx || !p_cuModuleGetFunction || !p_cuModuleUnload ||
!p_cuMemAlloc || !p_cuMemFree || !p_cuMemcpyHtoD || !p_cuMemcpyDtoH ||
!p_cuLaunchKernel || !p_cuCtxSynchronize)
{
return 0;
}
hNVRTC = load_nvrtc_any();
if(!hNVRTC) return 0;
p_nvrtcCreateProgram = (PFN_nvrtcCreateProgram)sym(hNVRTC, "nvrtcCreateProgram");
p_nvrtcCompileProgram= (PFN_nvrtcCompileProgram)sym(hNVRTC, "nvrtcCompileProgram");
p_nvrtcGetProgramLogSize=(PFN_nvrtcGetProgramLogSize)sym(hNVRTC, "nvrtcGetProgramLogSize");
p_nvrtcGetProgramLog=(PFN_nvrtcGetProgramLog)sym(hNVRTC, "nvrtcGetProgramLog");
p_nvrtcGetPTXSize=(PFN_nvrtcGetPTXSize)sym(hNVRTC, "nvrtcGetPTXSize");
p_nvrtcGetPTX=(PFN_nvrtcGetPTX)sym(hNVRTC, "nvrtcGetPTX");
p_nvrtcDestroyProgram=(PFN_nvrtcDestroyProgram)sym(hNVRTC, "nvrtcDestroyProgram");
if(!p_nvrtcCreateProgram || !p_nvrtcCompileProgram || !p_nvrtcGetProgramLogSize ||
!p_nvrtcGetProgramLog || !p_nvrtcGetPTXSize || !p_nvrtcGetPTX || !p_nvrtcDestroyProgram)
{
return 0;
}
return 1;
}
int load_weights_from_file(const char* path) {
// expects nf floats + 1 bias float (all float32)
if(!hW) return 0;
FILE* f = fopen(path, "rb");
if(!f) {
// default zero weights
int i; for(i=0;i<nf;i++) hW[i] = 0.0f;
hB = 0.0f;
return 0;
}
size_t want = (size_t)(nf + 1);
float* tmp = (float*)malloc(want*sizeof(float));
if(!tmp) { fclose(f); return 0; }
size_t got = fread(tmp, sizeof(float), want, f);
fclose(f);
if(got >= (size_t)nf) {
int i; for(i=0;i<nf;i++) hW[i] = tmp[i];
if(got >= (size_t)(nf+1)) hB = tmp[nf];
else hB = 0.0f;
free(tmp);
return 1;
}
free(tmp);
return 0;
}
public:
int isReady() const { return ready; }
int init(int nfIn, int maxBatchIn) {
shutdown(); // idempotent
nf = nfIn;
maxBatch = maxBatchIn;
if(nf <= 0 || maxBatch <= 0) return 0;
if(!load_symbols()) { release_all(); return 0; }
CUresult e = p_cuInit(0);
if(e != CUDA_SUCCESS) { release_all(); return 0; }
int count = 0;
e = p_cuDeviceGetCount(&count);
if(e != CUDA_SUCCESS || count <= 0) { release_all(); return 0; }
e = p_cuDeviceGet(&dev, 0);
if(e != CUDA_SUCCESS) { release_all(); return 0; }
int maj=0,min=0;
if(p_cuDeviceComputeCapability) p_cuDeviceComputeCapability(&maj,&min,dev);
if(maj <= 0) { maj=5; min=2; }
e = p_cuCtxCreate(&ctx, 0, dev);
if(e != CUDA_SUCCESS || !ctx) { release_all(); return 0; }
// ---- NVRTC compile ----
nvrtcProgram prog = 0;
nvrtcResult r = p_nvrtcCreateProgram(&prog, kSrc, "advise_kernel.cu", 0, 0, 0);
if(r != NVRTC_SUCCESS || !prog) { release_all(); return 0; }
char archOpt[64];
sprintf(archOpt, "--gpu-architecture=compute_%d%d", maj, min);
const char* opts[] = { archOpt, "--use_fast_math" };
r = p_nvrtcCompileProgram(prog, (int)(sizeof(opts)/sizeof(opts[0])), opts);
// Print compile log to Zorro console (optional)
size_t logSize = 0;
p_nvrtcGetProgramLogSize(prog, &logSize);
if(logSize > 1) {
char* logbuf = (char*)malloc(logSize + 1);
if(logbuf) {
logbuf[0] = 0;
p_nvrtcGetProgramLog(prog, logbuf);
printf("\n[NVRTC-advise] %s", logbuf);
free(logbuf);
}
}
if(r != NVRTC_SUCCESS) {
p_nvrtcDestroyProgram(&prog);
release_all();
return 0;
}
size_t ptxSize = 0;
r = p_nvrtcGetPTXSize(prog, &ptxSize);
if(r != NVRTC_SUCCESS || ptxSize < 8) {
p_nvrtcDestroyProgram(&prog);
release_all();
return 0;
}
char* ptx = (char*)malloc(ptxSize + 1);
if(!ptx) {
p_nvrtcDestroyProgram(&prog);
release_all();
return 0;
}
ptx[0] = 0;
r = p_nvrtcGetPTX(prog, ptx);
p_nvrtcDestroyProgram(&prog);
if(r != NVRTC_SUCCESS) {
free(ptx);
release_all();
return 0;
}
e = p_cuModuleLoadDataEx(&mod, (const void*)ptx, 0, 0, 0);
free(ptx);
if(e != CUDA_SUCCESS || !mod) { release_all(); return 0; }
e = p_cuModuleGetFunction(&func, mod, "advise_kernel");
if(e != CUDA_SUCCESS || !func) { release_all(); return 0; }
// ---- allocate device buffers ----
e = p_cuMemAlloc(&dX, sizeof(float)* (size_t)maxBatch * (size_t)nf);
if(e != CUDA_SUCCESS || !dX) { release_all(); return 0; }
e = p_cuMemAlloc(&dOut, sizeof(float)* (size_t)maxBatch);
if(e != CUDA_SUCCESS || !dOut) { release_all(); return 0; }
e = p_cuMemAlloc(&dW, sizeof(float)*(size_t)nf);
if(e != CUDA_SUCCESS || !dW) { release_all(); return 0; }
e = p_cuMemAlloc(&dB, sizeof(float));
if(e != CUDA_SUCCESS || !dB) { release_all(); return 0; }
// ---- allocate host staging ----
hX = (float*)malloc(sizeof(float)*(size_t)maxBatch*(size_t)nf);
hOut = (float*)malloc(sizeof(float)*(size_t)maxBatch);
hW = (float*)malloc(sizeof(float)*(size_t)nf);
if(!hX || !hOut || !hW) { release_all(); return 0; }
// ---- load weights (optional) ----
load_weights_from_file("Data\\Alpha12_cuda_w.bin");
// copy weights to device
e = p_cuMemcpyHtoD(dW, hW, sizeof(float)*(size_t)nf);
if(e != CUDA_SUCCESS) { release_all(); return 0; }
e = p_cuMemcpyHtoD(dB, &hB, sizeof(float));
if(e != CUDA_SUCCESS) { release_all(); return 0; }
ready = 1;
printf("\n[CUDA-advise] init OK (nf=%d maxBatch=%d cc=%d.%d)", nf, maxBatch, maj, min);
return 1;
}
void shutdown() {
release_all();
}
// Batch inference:
// - X: float[batch*nf] on host (row-major)
// - out: float[batch] on host
// returns 1 on GPU success, 0 on failure.
int inferBatch(const float* X, int batch, float* out) {
if(!ready) return 0;
if(!X || !out) return 0;
if(batch <= 0) return 1;
if(batch > maxBatch) batch = maxBatch;
size_t bytesX = sizeof(float)*(size_t)batch*(size_t)nf;
size_t bytesO = sizeof(float)*(size_t)batch;
CUresult e = p_cuMemcpyHtoD(dX, X, bytesX);
if(e != CUDA_SUCCESS) { ready = 0; return 0; }
int nfLocal = nf;
int batchLocal = batch;
void* params[] = {
(void*)&dX,
(void*)&dW,
(void*)&dB,
(void*)&dOut,
(void*)&nfLocal,
(void*)&batchLocal
};
unsigned int block = 128;
unsigned int grid = (unsigned int)((batch + (int)block - 1) / (int)block);
e = p_cuLaunchKernel(func, grid,1,1, block,1,1, 0, 0, params, 0);
if(e != CUDA_SUCCESS) { ready = 0; return 0; }
e = p_cuCtxSynchronize();
if(e != CUDA_SUCCESS) { ready = 0; return 0; }
e = p_cuMemcpyDtoH(out, dOut, bytesO);
if(e != CUDA_SUCCESS) { ready = 0; return 0; }
return 1;
}
// helper: access staging buffers
float* hostX() { return hX; }
float* hostOut() { return hOut; }
int cap() const { return maxBatch; }
int featN() const { return nf; }
};
// ========================= Alpha12Strategy (owner) =====================
// Drop-in full class with CUDA-batched advisor + per-bar cache integrated.
// Assumes CudaAdvisor + other components (NodePool, DTree, NetState, Logger, RuntimeManager, MarkovChain)
// are already defined above.
class Alpha12Strategy {
public:
// Components
NodePool pool;
DTree dt;
NetState net;
Alpha12Logger log;
RuntimeManager rt;
// Markov: HTF, LTF, REL
MarkovChain MH;
MarkovChain ML;
MarkovChain MR;
// --- Candlestick buffers (NO static locals) ---
var CDL_L[MC_NPAT]; // 5M (LTF) pattern values
var CDL_H[MC_NPAT]; // 1H (HTF) pattern values
// Strategy runtime
int ready = 0;
int Epoch = 0;
int CtxID = 0;
// Markov adaptive knobs
var FB_W = 0.70;
var MC_ACT_dyn = MC_ACT;
var MC_Alpha = 1.0;
int CandNeigh = CAND_NEIGH;
// Rewire/update cursors
int RewirePos = 0;
int RewirePasses = 0;
int UpdatePos = 0;
int UpdatePasses = 0;
// Signal & trade
var LastSig = 0;
// Advisor budget/rotation
int AdviseMax = 16;
// Advisor seed cache (moved from static)
int seedBar = -1;
int haveSeed[NET_EQNS];
var seedVal[NET_EQNS];
// ---- CUDA advisor (batched DTREE replacement) ----
CudaAdvisor cuda;
int CudaReady = 0;
// Per-bar cached advice
int AdvCacheBar = -999999;
i8 AdvHave[NET_EQNS];
fvar AdvCache[NET_EQNS];
public:
Alpha12Strategy() {
dt.bindPool(&pool);
memset(haveSeed,0,sizeof(haveSeed));
memset(seedVal,0,sizeof(seedVal));
AdvCacheBar = -999999;
memset(AdvHave,0,sizeof(AdvHave));
memset(AdvCache,0,sizeof(AdvCache));
}
~Alpha12Strategy(){ cleanup(); }
// --------------------- utilities (direct lifts) ---------------------
static var randsign(){ return ifelse(random(1) < 0.5, -1.0, 1.0); }
static var mapUnit(var u,var lo,var hi){
if(u<-1) u=-1;
if(u>1) u=1;
var t=0.5*(u+1.0);
return lo + t*(hi-lo);
}
static var safeNum(var x){ if(invalid(x)) return 0; return clamp(x,-1e100,1e100); }
static void sanitize(var* A,int n){ int k; for(k=0;k<n;k++) A[k]=safeNum(A[k]); }
static var sat100(var x){ return clamp(x,-100.,100.); }
static var nrm_s(var x) { return sat100(100.*tanh(x)); }
static var nrm_scl(var x, var s) { return sat100(100.*tanh(s*x)); }
// --------------- Candlestick pattern builder (unchanged) ------------
int buildCDL_TA61(var* out, string* names)
{
int n = 0;
#define ADD(Name, Call) do{ var v = (Call); if(out) out[n] = v/100.; if(names) names[n] = Name; n++; }while(0)
ADD("CDL2Crows", CDL2Crows());
ADD("CDL3BlackCrows", CDL3BlackCrows());
ADD("CDL3Inside", CDL3Inside());
ADD("CDL3LineStrike", CDL3LineStrike());
ADD("CDL3Outside", CDL3Outside());
ADD("CDL3StarsInSouth", CDL3StarsInSouth());
ADD("CDL3WhiteSoldiers", CDL3WhiteSoldiers());
ADD("CDLAbandonedBaby", CDLAbandonedBaby(0.3));
ADD("CDLAdvanceBlock", CDLAdvanceBlock());
ADD("CDLBeltHold", CDLBeltHold());
ADD("CDLBreakaway", CDLBreakaway());
ADD("CDLClosingMarubozu", CDLClosingMarubozu());
// was: CDLConcealBabysWall() (very likely wrong)
ADD("CDLConcealingBabySwallow", A12_CDL_CONCEALING_BABY_SWALLOW());
ADD("CDLCounterAttack", CDLCounterAttack());
ADD("CDLDarkCloudCover", CDLDarkCloudCover(0.3));
ADD("CDLDoji", CDLDoji());
ADD("CDLDojiStar", CDLDojiStar());
ADD("CDLDragonflyDoji", CDLDragonflyDoji());
ADD("CDLEngulfing", CDLEngulfing());
ADD("CDLEveningDojiStar", CDLEveningDojiStar(0.3));
ADD("CDLEveningStar", CDLEveningStar(0.3));
ADD("CDLGapSideSideWhite", CDLGapSideSideWhite());
ADD("CDLGravestoneDoji", CDLGravestoneDoji());
ADD("CDLHammer", CDLHammer());
ADD("CDLHangingMan", CDLHangingMan());
ADD("CDLHarami", CDLHarami());
ADD("CDLHaramiCross", CDLHaramiCross());
// was: CDLHignWave()
ADD("CDLHighWave", A12_CDL_HIGH_WAVE());
ADD("CDLHikkake", CDLHikkake());
ADD("CDLHikkakeMod", CDLHikkakeMod());
ADD("CDLHomingPigeon", CDLHomingPigeon());
ADD("CDLIdentical3Crows", CDLIdentical3Crows());
ADD("CDLInNeck", CDLInNeck());
ADD("CDLInvertedHammer", CDLInvertedHammer());
ADD("CDLKicking", CDLKicking());
ADD("CDLKickingByLength", CDLKickingByLength());
ADD("CDLLadderBottom", CDLLadderBottom());
ADD("CDLLongLeggedDoji", CDLLongLeggedDoji());
ADD("CDLLongLine", CDLLongLine());
ADD("CDLMarubozu", CDLMarubozu());
ADD("CDLMatchingLow", CDLMatchingLow());
ADD("CDLMatHold", CDLMatHold(0.5));
ADD("CDLMorningDojiStar", CDLMorningDojiStar(0.3));
ADD("CDLMorningStar", CDLMorningStar(0.3));
ADD("CDLOnNeck", CDLOnNeck());
ADD("CDLPiercing", CDLPiercing());
// your existing underscore alias is fine
ADD("CDLRickshawMan", CDL_RickshawMan());
ADD("CDLRiseFall3Methods", CDL_RiseFall3Methods());
// was: CDL_SeperatingLines()
ADD("CDLSeparatingLines", A12_CDL_SEPARATING_LINES());
ADD("CDLShootingStar", CDL_ShootingStar());
ADD("CDLShortLine", CDL_ShortLine());
ADD("CDLSpinningTop", CDL_SpinningTop());
ADD("CDLStalledPattern", CDL_StalledPattern());
// was: CDL_StickSandwhich()
ADD("CDLStickSandwich", A12_CDL_STICKSANDWICH());
ADD("CDLTakuri", CDL_Takuri());
ADD("CDLTasukiGap", CDL_TasukiGap());
ADD("CDLThrusting", CDL_Thrusting());
ADD("CDLTristar", CDL_Tristar());
ADD("CDLUnique3River", CDL_Unique3River());
ADD("CDLUpsideGap2Crows", CDLUpsideGap2Crows());
ADD("CDLXSideGap3Methods", CDL_XSideGap3Methods());
#undef ADD
return n;
}
// --- quick aliases (underscore helpers used elsewhere in this file) ---
// Keep only wrappers that map underscore-names -> real Zorro functions.
var CDL_RickshawMan() { return CDLRickshawMan(); }
var CDL_RiseFall3Methods() { return CDLRiseFall3Methods(); }
var CDL_ShootingStar() { return CDLShootingStar(); }
var CDL_ShortLine() { return CDLShortLine(); }
var CDL_SpinningTop() { return CDLSpinningTop(); }
var CDL_StalledPattern() { return CDLStalledPattern(); }
var CDL_Takuri() { return CDLTakuri(); }
var CDL_TasukiGap() { return CDLTasukiGap(); }
var CDL_Thrusting() { return CDLThrusting(); }
var CDL_Tristar() { return CDLTristar(); }
var CDL_Unique3River() { return CDLUnique3River(); }
var CDL_XSideGap3Methods() { return CDLXSideGap3Methods(); }
// --------------- Markov relation mapping (unchanged) ----------------
int relFromHL(int sL, int sH){
if(sL <= 0 || sH <= 0) return MC_NONE;
int idxL = (sL - 1)/2; int bullL = ((sL - 1)%2)==1;
int idxH = (sH - 1)/2; int bullH = ((sH - 1)%2)==1;
if(idxL == idxH && bullL == bullH) return sL;
return MC_NONE;
}
int is_H1_close(){ return (Bar % TF_H1) == 0; }
// ------------------ memory estimator integration --------------------
int mem_mb_est() { return rt.mem_mb_est(); }
// =================== CUDA advice cache helpers ======================
void resetAdvCacheForBar() {
if(AdvCacheBar != Bar) {
AdvCacheBar = Bar;
for(int k=0;k<net.N;k++) { AdvHave[k]=0; AdvCache[k]=0; }
}
}
// EXACT same gating behavior as old adviseEq early exits.
int adviseGate(int i) {
if(!allowAdvise(i)) return 0;
if(is(INITRUN)) return 0;
int tight = (mem_mb_est() >= MEM_BUDGET_MB - MEM_HEADROOM_MB);
if(tight) return 0;
// Hit-rate gate (unchanged)
if(net.HitN[i] > 32) {
var h = (var)net.HitEW[i];
var gate = 0.40 + 0.15*(1.0 - MH.Entropy);
if(h < gate) {
if(random() >= 0.5) return 0;
}
}
return 1;
}
// Build a batch over [i0,i1), run GPU once, populate AdvCache for those i.
void computeAdviceBatchRange(int i0, int i1, var lambda, var mean, var energy, var power) {
resetAdvCacheForBar();
if(!CudaReady) return;
if(i0 < 0) i0 = 0;
if(i1 > net.N) i1 = net.N;
if(i0 >= i1) return;
float* X = cuda.hostX();
float* O = cuda.hostOut();
int nf = cuda.featN();
int idx[NET_EQNS];
int bsz = 0;
for(int i=i0;i<i1;i++) {
if(AdvHave[i]) continue;
if(!adviseGate(i)) {
AdvHave[i] = 1;
AdvCache[i] = (fvar)0;
continue;
}
// build features exactly like old DTREE path did
int tid = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
var pred = dt.predByTid(tid);
var S[ADV_EQ_NF];
buildEqFeatures(i, lambda, mean, energy, power, pred, S);
for(int k=0;k<ADV_EQ_NF;k++)
X[bsz*nf + k] = (float)S[k];
idx[bsz] = i;
bsz++;
if(bsz >= cuda.cap()) break;
}
if(bsz <= 0) return;
if(!cuda.inferBatch(X, bsz, O)) {
CudaReady = 0; // fail closed; avoid repeated driver errors
return;
}
for(int j=0;j<bsz;j++) {
int eq = idx[j];
var p = (var)O[j]; // expected in [-1..1]
p = clamp(p, -1.0, 1.0);
AdvCache[eq] = (fvar)p;
AdvHave[eq] = 1;
}
}
// ----------------------- strategy init/cleanup ----------------------
void init() {
if(ready) return;
BarPeriod = BAR_PERIOD;
LookBack = max(300, NWIN);
set(PLOTNOW);
asset(ASSET_SYMBOL);
// effective K
net.Keff = KPROJ;
if(net.Keff < 1) net.Keff = 1;
if(net.Keff > net.K) net.Keff = net.K;
// allocate components
net.allocate();
MH.alloc(); ML.alloc(); MR.alloc();
MH.Alpha = MC_Alpha; ML.Alpha = MC_Alpha; MR.Alpha = MC_Alpha;
dt.allocDepthLUT();
dt.DTreeExp = 0;
dt.Root = dt.createNode(MAX_DEPTH);
rt.RT_TreeMaxDepth = MAX_DEPTH;
dt.refreshDepthW();
// index tree and map EqTreeId
reindexTreeAndMap();
// projection matrix
net.randomizeRP();
net.computeProjection();
// initial full rewire
rewireEpoch(0,0,0,0);
log.writeEqHeaderOnce();
RewirePos=0; RewirePasses=0;
UpdatePos=0; UpdatePasses=0;
// seed cache reset
seedBar = -1;
memset(haveSeed,0,sizeof(haveSeed));
// ---- CUDA advisor init ----
CudaReady = 0;
if(cuda.init(ADV_EQ_NF, NET_EQNS)) {
CudaReady = 1;
}
AdvCacheBar = -999999;
memset(AdvHave,0,sizeof(AdvHave));
memset(AdvCache,0,sizeof(AdvCache));
ready = 1;
printf("\n[Alpha12] init done: N=%i D=%i K=%i (Keff=%i) Depth=%i est=%i MB CUDA=%i",
net.N, net.D, net.K, net.Keff, rt.RT_TreeMaxDepth, mem_mb_est(), CudaReady);
}
void cleanup() {
if(!ready) return;
// CUDA shutdown first
if(CudaReady) {
cuda.shutdown();
CudaReady = 0;
}
MH.freeMem(); ML.freeMem(); MR.freeMem();
dt.freeAll();
pool.freeAll();
net.freeAll();
ready = 0;
}
// --------------------- core tree reindex & mapping -------------------
void reindexTreeAndMap(){
dt.TreeN = 0;
dt.indexTreeDFS(dt.Root);
if(dt.TreeN <= 0){
dt.TreeN = 1;
if(dt.TreeIdx) dt.TreeIdx[0] = dt.Root;
}
{ int i; for(i=0;i<net.N;i++) net.EqTreeId[i] = (i16)(i % dt.TreeN); }
dt.resizePredCacheToTree();
dt.refreshEqAngles(net.EqTreeId, net.N);
dt.maybeShrinkTreeIdx();
rt.recalcTreeBytes(dt);
int includeExpr = (LOG_EXPR_TEXT && net.Sym && !net.SymFreed) ? 1 : 0;
rt.computeMemFixedBytes(net, dt, includeExpr);
rt.MemFixedBytes += 3*(MC_STATES*MC_STATES*(int)sizeof(int) + MC_STATES*(int)sizeof(int));
}
// --------------------- Markov updates (OOP) --------------------------
void updateMarkov_5M(){
// fill member buffer (no static locals)
buildCDL_TA61(CDL_L, 0);
int s = MarkovChain::stateFromCDL(CDL_L, MC_ACT_dyn);
if(Bar > LookBack) ML.update(ML.Prev, s);
ML.Prev = s;
if(s > 0 && s < MC_STATES){
if(ML.RowSum[s] > 0) ML.rowStats(s, &ML.PBullNext, &ML.Entropy);
ML.Cur = s;
}
}
void updateMarkov_1H(){
int saveTF = TimeFrame;
TimeFrame = TF_H1;
// fill member buffer (no static locals)
buildCDL_TA61(CDL_H, 0);
int sH = MarkovChain::stateFromCDL(CDL_H, MC_ACT_dyn);
if(Bar > LookBack) MH.update(MH.Prev, sH);
MH.Prev = sH;
if(sH > 0 && sH < MC_STATES){
if(MH.RowSum[sH] > 0) MH.rowStats(sH, &MH.PBullNext, &MH.Entropy);
MH.Cur = sH;
}
TimeFrame = saveTF;
}
void updateMarkov_REL(){
int r = relFromHL(ML.Cur, MH.Cur);
if(Bar > LookBack) MR.update(MR.Prev, r);
MR.Prev = r;
if(r > 0 && r < MC_STATES){
if(MR.RowSum[r] > 0) MR.rowStats(r, &MR.PBullNext, &MR.Entropy);
MR.Cur = r;
}
}
void updateAllMarkov(){
MH.Alpha = MC_Alpha;
ML.Alpha = MC_Alpha;
MR.Alpha = MC_Alpha;
updateMarkov_5M();
if(is_H1_close()){
updateMarkov_1H();
updateMarkov_REL();
}
}
// --------------------- Advisor rotation (unchanged) ------------------
int allowAdvise(int i){
int groups = net.N / AdviseMax;
if(groups < 1) groups = 1;
return ((i / AdviseMax) % groups) == (Bar % groups);
}
// --------------------- DTREE advisor wrappers ------------------------
// NOTE: CUDA-backed (batched + cached) with identical gating.
var adviseEq(int i, var lambda, var mean, var energy, var power) {
// keep the exact same gating behavior
if(!adviseGate(i)) return 0;
resetAdvCacheForBar();
if(AdvHave[i]) return (var)AdvCache[i];
// If we reach here, this equation wasn’t pre-batched by heavyUpdateChunk()
// (or adviseEq was called elsewhere). We do a tiny batch fallback.
if(CudaReady) {
computeAdviceBatchRange(i, i+1, lambda, mean, energy, power);
if(AdvHave[i]) return (var)AdvCache[i];
}
// CPU fallback: neutral
AdvHave[i] = 1;
AdvCache[i] = (fvar)0;
return 0;
}
var adviseSeed(int i, var lambda, var mean, var energy, var power) {
if(seedBar != Bar) {
for(int k=0;k<net.N;k++) haveSeed[k]=0;
seedBar = Bar;
}
if(i < 0) i = 0;
if(i >= net.N) i = i % net.N;
if(!allowAdvise(i)) return 0;
if(!haveSeed[i]) {
seedVal[i] = adviseEq(i, lambda, mean, energy, power);
haveSeed[i] = 1;
}
return seedVal[i];
}
static var mix01(var a, int salt){
var z = sin(123.456*a + 0.001*salt) + cos(98.765*a + 0.002*salt);
return tanh(0.75*z);
}
static var mapA(var a,var lo,var hi){ return mapUnit(a,lo,hi); }
// ------------------- Feature builders (unchanged logic) ---------------
void buildEqFeatures(int i, var lambda, var mean, var energy, var power, var pred, var* S /*ADV_EQ_NF*/) {
int tid = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
Node* t = dt.treeAt(tid);
var th_i = (dt.EqTheta ? dt.EqTheta[i] : 0);
var dphi = DTree::angDiff(dt.CycPh, th_i);
var alignC = cos(dphi);
var alignS = sin(dphi);
S[0] = nrm_s(net.State[i]);
S[1] = nrm_s(mean);
S[2] = nrm_scl(power,0.05);
S[3] = nrm_scl(energy,0.01);
S[4] = nrm_s(lambda);
S[5] = sat100(200.0*(pred-0.5));
S[6] = sat100(200.0*((var)t->d/MAX_DEPTH)-100.0);
S[7] = sat100(1000.0*t->r);
S[8] = nrm_s((var)net.TreeTerm[i]);
S[9] = sat100( (200.0/3.0) * (var)((int)net.Mode[i]) - 100.0 );
S[10] = sat100(200.0*(MH.PBullNext-0.5));
S[11] = sat100(200.0*(MH.Entropy-0.5));
S[12] = sat100(200.0*((var)net.HitEW[i] - 0.5));
S[13] = sat100(100.*alignC);
S[14] = sat100(100.*alignS);
S[15] = sat100(200.0*(ML.PBullNext - 0.5));
S[16] = sat100(200.0*(ML.Entropy - 0.5));
S[17] = sat100(200.0*(MR.PBullNext - 0.5));
S[18] = sat100(200.0*(MR.Entropy - 0.5));
sanitize(S,ADV_EQ_NF);
}
// ---------------- adjacency scoring (heuristic only) ------------------
var scorePairSafe(int i, int j, var lambda, var mean, var energy, var power){
int ti = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
int tj = dt.safeTreeIndexFromEq((int)net.EqTreeId[j], dt.TreeN);
Node* ni = dt.treeAt(ti);
Node* nj = dt.treeAt(tj);
var simD = 1.0 / (1.0 + abs((var)ni->d - (var)nj->d));
var dr = 50.0*abs(ni->r - nj->r);
var simR = 1.0 / (1.0 + dr);
var predi = dt.predByTid(ti);
var predj = dt.predByTid(tj);
var pred = 0.5*(predi + predj);
var score = 0.5*pred + 0.3*simD + 0.2*simR;
return 2.0*score - 1.0;
}
void rewireAdjacency_DTREE_range(int i0,int i1, var lambda, var mean, var energy, var power){
int i,d,c,best,cand;
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
for(i=i0;i<i1;i++){
for(d=0; d<net.D; d++){
var bestScore = -2; best=-1;
for(c=0;c<CandNeigh;c++){
cand = (int)random(net.N);
if(cand==i) continue;
int clash=0,k;
for(k=0;k<d;k++){
int prev = net.Adj
|
|
|
The Candle Oracle Lattice (CUDA version cont.)
[Re: TipmyPip]
#489161
Yesterday at 13:02
Yesterday at 13:02
|
Joined: Sep 2017
Posts: 200
TipmyPip
OP
Member
|
OP
Member
Joined: Sep 2017
Posts: 200
|
void rewireAdjacency_DTREE_range(int i0,int i1, var lambda, var mean, var energy, var power){
int i,d,c,best,cand;
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
for(i=i0;i<i1;i++){
for(d=0; d<net.D; d++){
var bestScore = -2; best=-1;
for(c=0;c<CandNeigh;c++){
cand = (int)random(net.N);
if(cand==i) continue;
int clash=0,k;
for(k=0;k<d;k++){
int prev = net.Adj[i*net.D + k];
if(prev>=0 && prev==cand){ clash=1; break; }
}
if(clash) continue;
var s = scorePairSafe(i,cand,lambda,mean,energy,power);
if(s > bestScore){ bestScore=s; best=cand; }
}
if(best<0){ do{ best=(int)random(net.N);} while(best==i); }
net.Adj[i*net.D + d] = (i16)best;
}
}
}
// ---------------- coefficient synthesis (unchanged) -------------------
void synthesizeEquationFromDTREE(int i, var lambda, var mean, var energy, var power){
var seed = adviseSeed(i,lambda,mean,energy,power);
net.Mode[i] = (int)(abs(1000*seed)) & 3;
net.WSelf[i] = (fvar)mapA(mix01(seed, 11), 0.15, 0.85);
net.WN1[i] = (fvar)mapA(mix01(seed, 12), 0.05, 0.35);
net.WN2[i] = (fvar)mapA(mix01(seed, 13), 0.05, 0.35);
net.WGlob1[i] = (fvar)mapA(mix01(seed, 14), 0.05, 0.30);
net.WGlob2[i] = (fvar)mapA(mix01(seed, 15), 0.05, 0.30);
net.WMom[i] = (fvar)mapA(mix01(seed, 16), 0.02, 0.15);
net.WTree[i] = (fvar)mapA(mix01(seed, 17), 0.05, 0.35);
net.WAdv[i] = (fvar)mapA(mix01(seed, 18), 0.05, 0.35);
net.A1x[i] = (fvar)(randsign()*mapA(mix01(seed, 21), 0.6, 1.2));
net.A1lam[i] = (fvar)(randsign()*mapA(mix01(seed, 22), 0.05,0.35));
net.A1mean[i]= (fvar) mapA(mix01(seed, 23),-0.30,0.30);
net.A1E[i] = (fvar) mapA(mix01(seed, 24),-0.0015,0.0015);
net.A1P[i] = (fvar) mapA(mix01(seed, 25),-0.30,0.30);
net.A1i[i] = (fvar) mapA(mix01(seed, 26),-0.02,0.02);
net.A1c[i] = (fvar) mapA(mix01(seed, 27),-0.20,0.20);
net.A2x[i] = (fvar)(randsign()*mapA(mix01(seed, 31), 0.6, 1.2));
net.A2lam[i] = (fvar)(randsign()*mapA(mix01(seed, 32), 0.05,0.35));
net.A2mean[i]= (fvar) mapA(mix01(seed, 33),-0.30,0.30);
net.A2E[i] = (fvar) mapA(mix01(seed, 34),-0.0015,0.0015);
net.A2P[i] = (fvar) mapA(mix01(seed, 35),-0.30,0.30);
net.A2i[i] = (fvar) mapA(mix01(seed, 36),-0.02,0.02);
net.A2c[i] = (fvar) mapA(mix01(seed, 37),-0.20,0.20);
net.G1mean[i] = (fvar) mapA(mix01(seed, 41), 0.4, 1.6);
net.G1E[i] = (fvar) mapA(mix01(seed, 42),-0.004,0.004);
net.G2P[i] = (fvar) mapA(mix01(seed, 43), 0.1, 1.2);
net.G2lam[i] = (fvar) mapA(mix01(seed, 44), 0.05, 0.7);
net.TAlpha[i] = (fvar) mapA(mix01(seed, 51), 0.3, 1.5);
net.TBeta[i] = (fvar) mapA(mix01(seed, 52), 6.0, 50.0);
net.PropRaw[i] = (fvar)(0.01 + 0.99*(0.5*(seed+1.0)));
{ var boost = 0.75 + 0.5*(var)net.HitEW[i];
net.PropRaw[i] = (fvar)((var)net.PropRaw[i] * boost);
}
}
void synthesizeEquation_range(int i0,int i1, var lambda, var mean, var energy, var power){
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
int i;
for(i=i0;i<i1;i++) synthesizeEquationFromDTREE(i,lambda,mean,energy,power);
}
// ------------------- DTREE ensemble term (unchanged) ------------------
var dtreeTerm(int i, int* outTopEq, var* outTopW){
int j;
int tid_i = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
Node* ti = dt.treeAt(tid_i);
int di = ti->d; var ri=ti->r;
var predI = dt.predByTid(tid_i);
var alpha = (var)net.TAlpha[i];
var beta = (var)net.TBeta[i];
var sumw=0, acc=0, bestW=-1; int bestJ=-1;
for(j=0;j<net.N;j++){
if(j==i) continue;
int tid_j = dt.safeTreeIndexFromEq((int)net.EqTreeId[j], dt.TreeN);
Node* tj=dt.treeAt(tid_j);
int dj=tj->d; var rj=tj->r;
var predJ = dt.predByTid(tid_j);
var w = exp(-alpha*abs(di-dj)) * exp(-beta*abs(ri-rj));
var predBoost = 0.5 + 0.5*(predI*predJ);
var propBoost = 0.5 + 0.5*( (net.Prop[i] + net.Prop[j]) );
w *= predBoost * propBoost;
var pairAdv = scorePairSafe(i,j,0,0,0,0);
var pairBoost = 0.75 + 0.25*(0.5*(pairAdv+1.0));
w *= pairBoost;
sumw += w;
acc += w*net.State[j];
if(w>bestW){ bestW=w; bestJ=j; }
}
if(outTopEq) *outTopEq = bestJ;
if(outTopW) *outTopW = ifelse(sumw>0, bestW/sumw, 0);
if(sumw>0) return acc/sumw;
return 0;
}
// ------------------- expression builder (optional) --------------------
void buildSymbolicExpr(int i, int n1, int n2){
if(!LOG_EXPR_TEXT) return;
if(!net.Sym) return;
string s = net.Sym[i];
s[0]=0;
string a1 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
(var)net.A1x[i], n1, (var)net.A1lam[i], (var)net.A1mean[i], (var)net.A1E[i], (var)net.A1P[i], (var)net.A1i[i], (var)net.A1c[i]);
string a2 = strf("(%.3f*x[%i] + %.3f*lam + %.3f*mean + %.5f*E + %.3f*P + %.3f*i + %.3f)",
(var)net.A2x[i], n2, (var)net.A2lam[i], (var)net.A2mean[i], (var)net.A2E[i], (var)net.A2P[i], (var)net.A2i[i], (var)net.A2c[i]);
Alpha12Logger::strlcat_safe(s, "x[i]_next = ", EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*x[i] + ", (var)net.WSelf[i]), EXPR_MAXLEN);
if(net.Mode[i]==1){
Alpha12Logger::strlcat_safe(s, strf("%.3f*tanh%s + ", (var)net.WN1[i], a1), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*sin%s + ", (var)net.WN2[i], a2), EXPR_MAXLEN);
} else if(net.Mode[i]==2){
Alpha12Logger::strlcat_safe(s, strf("%.3f*cos%s + ", (var)net.WN1[i], a1), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*tanh%s + ", (var)net.WN2[i], a2), EXPR_MAXLEN);
} else {
Alpha12Logger::strlcat_safe(s, strf("%.3f*sin%s + ", (var)net.WN1[i], a1), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*cos%s + ", (var)net.WN2[i], a2), EXPR_MAXLEN);
}
Alpha12Logger::strlcat_safe(s, strf("%.3f*tanh(%.3f*mean + %.5f*E) + ",
(var)net.WGlob1[i], (var)net.G1mean[i], (var)net.G1E[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*sin(%.3f*P + %.3f*lam) + ",
(var)net.WGlob2[i], (var)net.G2P[i], (var)net.G2lam[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*(x[i]-x_prev[i]) + ", (var)net.WMom[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("Prop[i]=%.4f; ", (var)net.Prop[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*DT(i) + ", (var)net.WTree[i]), EXPR_MAXLEN);
Alpha12Logger::strlcat_safe(s, strf("%.3f*DTREE(i)", (var)net.WAdv[i]), EXPR_MAXLEN);
}
void buildSymbolicExpr_range(int i0,int i1){
if(!LOG_EXPR_TEXT) return;
if(i0<0) i0=0; if(i1>net.N) i1=net.N;
int i;
for(i=i0;i<i1;i++){
int n1 = net.adjSafe(i,0);
int n2 = ifelse(net.D>=2, net.adjSafe(i,1), n1);
buildSymbolicExpr(i,n1,n2);
}
}
// ------------------- chunked rewire orchestrator ----------------------
int rewireEpochChunk(var lambda, var mean, var energy, var power, int batch){
if(net.N <= 0) return 0;
if(batch < REWIRE_MIN_BATCH) batch = REWIRE_MIN_BATCH;
if(RewirePos >= net.N) RewirePos = 0;
int i0 = RewirePos;
int i1 = i0 + batch; if(i1 > net.N) i1 = net.N;
CandNeigh = ifelse(MH.Entropy < 0.45, CAND_NEIGH+4, CAND_NEIGH);
rewireAdjacency_DTREE_range(i0,i1, lambda,mean,energy,power);
net.sanitizeAdjacency();
synthesizeEquation_range(i0,i1, lambda,mean,energy,power);
buildSymbolicExpr_range(i0,i1);
RewirePos = i1;
if(RewirePos >= net.N){
RewirePos = 0;
RewirePasses++;
return 1;
}
return 0;
}
void rewireEpoch(var lambda, var mean, var energy, var power){
int done=0;
while(!done){
done = rewireEpochChunk(lambda,mean,energy,power, REWIRE_BATCH_EQ_H1);
}
net.normalizeProportions();
{
int D = net.D, i, total = net.N*D;
unsigned int h = 2166136261u;
for(i=0;i<total;i++){
unsigned int x = (unsigned int)net.Adj[i];
h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
}
CtxID = (int)((h ^ ((unsigned int)Epoch<<8)) & 0x7fffffff);
}
}
// ------------------- coarse net projection -> gamma -------------------
var projectNet(){
int i;
var sum=0,sumsq=0,cross=0;
for(i=0;i<net.N;i++){
sum += net.State[i];
sumsq += net.State[i]*net.State[i];
if(i+1<net.N) cross += net.State[i]*net.State[i+1];
}
var mean = sum/net.N;
var corr = cross/(net.N-1);
return 0.6*tanh(mean + 0.001*sumsq) + 0.4*sin(corr);
}
// ------------------- heavy update chunk (CUDA-batched) ----------------
var nonlin1(int i, int n1, var lam, var mean, var E, var P){
var x = net.State[n1];
var arg = (var)net.A1x[i]*x + (var)net.A1lam[i]*lam + (var)net.A1mean[i]*mean + (var)net.A1E[i]*E + (var)net.A1P[i]*P + (var)net.A1i[i]*i + (var)net.A1c[i];
return arg;
}
var nonlin2(int i, int n2, var lam, var mean, var E, var P){
var x = net.State[n2];
var arg = (var)net.A2x[i]*x + (var)net.A2lam[i]*lam + (var)net.A2mean[i]*mean + (var)net.A2E[i]*E + (var)net.A2P[i]*P + (var)net.A2i[i]*i + (var)net.A2c[i];
return arg;
}
int heavyUpdateChunk(var lambda, var mean, var energy, var power, int batch){
if(net.N <= 0) return 0;
if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;
if(UpdatePos >= net.N) UpdatePos = 0;
int i0 = UpdatePos;
int i1 = i0 + batch; if(i1 > net.N) i1 = net.N;
net.computeProjection();
// ---- NEW: precompute advice for this chunk in ONE GPU call ----
if(CudaReady) {
computeAdviceBatchRange(i0, i1, lambda, mean, energy, power);
} else {
resetAdvCacheForBar();
}
int i;
for(i=i0;i<i1;i++){
int n1 = net.adjSafe(i,0);
int n2 = ifelse(net.D>=2, net.adjSafe(i,1), n1);
int topEq=-1; var topW=0;
var treeT = dtreeTerm(i, &topEq, &topW);
net.TreeTerm[i] = (fvar)treeT;
net.TopEq[i] = (i16)topEq;
net.TopW[i] = (fvar)topW;
// adviseEq now returns from cache (no GPU launch here)
var adv = adviseEq(i, lambda, mean, energy, power);
var a1 = nonlin1(i,n1,lambda,mean,energy,power);
var a2 = nonlin2(i,n2,lambda,mean,energy,power);
var t1,t2;
if(net.Mode[i]==1){ t1=tanh(a1); t2=sin(a2); }
else if(net.Mode[i]==2){ t1=cos(a1); t2=tanh(a2); }
else { t1=sin(a1); t2=cos(a2); }
var glob1 = tanh((var)net.G1mean[i]*mean + (var)net.G1E[i]*energy);
var glob2 = sin ((var)net.G2P[i]*power + (var)net.G2lam[i]*lambda);
var mom = (net.State[i] - net.Prev[i]);
var xnext =
(var)net.WSelf[i]*net.State[i]
+ (var)net.WN1[i]*t1
+ (var)net.WN2[i]*t2
+ (var)net.WGlob1[i]*glob1
+ (var)net.WGlob2[i]*glob2
+ (var)net.WMom[i]*mom
+ (var)net.WTree[i]*treeT
+ (var)net.WAdv[i]*adv;
xnext = clamp(xnext, -10., 10.);
net.Prev[i] = net.State[i];
net.State[i] = xnext;
net.StateSq[i] = xnext*xnext;
net.AdvPrev[i] = (fvar)adv;
// ---- logging block (unchanged) ----
if(!rt.LogsOff && (Bar % LOG_EVERY)==0 && (i < LOG_EQ_SAMPLE)){
int tid = dt.safeTreeIndexFromEq((int)net.EqTreeId[i], dt.TreeN);
Node* tnode = dt.treeAt(tid);
int nodeDepth = (tnode ? tnode->d : 0);
var rate = (var)net.TBeta[i];
var pred = dt.predByTid(tid);
string expr = 0;
if(LOG_EXPR_TEXT && net.Sym) expr = net.Sym[i];
log.appendEqMetaLine(Bar, Epoch, CtxID,
i, n1, n2, tid, nodeDepth, rate, pred, adv, net.Prop[i], (int)net.Mode[i],
(var)net.WAdv[i], (var)net.WTree[i], MH.PBullNext, MH.Entropy, (int)MH.Cur,
expr);
}
}
UpdatePos = i1;
if(UpdatePos >= net.N){
UpdatePos = 0;
UpdatePasses++;
return 1;
}
return 0;
}
// ------------------- hit-rate scorer (unchanged) ----------------------
void updateHitRates(){
if(is(INITRUN)) return;
if(Bar <= LookBack) return;
var r = net.Ret1;
var sgnR = sign(r);
int i;
for(i=0;i<net.N;i++){
var a = (var)net.AdvPrev[i];
var sgnA = ifelse(a > 0.0001, 1, ifelse(a < -0.0001, -1, 0));
var hit = ifelse(sgnR == 0, 0.5, ifelse(sgnA == sgnR, 1.0, 0.0));
net.HitEW[i] = (fvar)((1.0 - 0.02)*(var)net.HitEW[i] + 0.02*hit);
net.HitN[i] += 1;
}
}
// ------------------- blend lambda/gamma & accuracy --------------------
var blendLambdaGamma(var lambda_raw, var gamma_raw){
var w = clamp(FB_W + 0.15*(0.5 - MH.Entropy), 0.4, 0.9);
var x = w*lambda_raw + (1.0 - w)*gamma_raw;
rt.acc_update(lambda_raw, gamma_raw);
return x;
}
// ------------------- rewire scheduler (unchanged logic) ----------------
void maybeRewireNow(var lambda, var mean, var energy, var power){
int mb = mem_mb_est();
if(mb >= UPDATE_MEM_HARD) return;
int batch = ifelse(is_H1_close(), REWIRE_BATCH_EQ_H1, REWIRE_BATCH_EQ_5M);
if(mb >= REWIRE_MEM_SOFT) batch = (batch>>1);
if(batch < REWIRE_MIN_BATCH) batch = REWIRE_MIN_BATCH;
int finished = rewireEpochChunk(lambda,mean,energy,power,batch);
if(finished && (RewirePasses % REWIRE_NORM_EVERY) == 0){
net.normalizeProportions();
log.writeEqHeaderOnce();
if((RewirePasses % META_EVERY) == 0){
int D = net.D, i, total = net.N*D;
unsigned int h = 2166136261u;
for(i=0;i<total;i++){
unsigned int x = (unsigned int)net.Adj[i];
h ^= x + 0x9e3779b9u + (h<<6) + (h>>2);
}
CtxID = (int)((h ^ ((unsigned int)Epoch<<8)) & 0x7fffffff);
}
}
}
// ------------------- heavy update scheduler (unchanged) ---------------
void runHeavyUpdates(var lambda, var mean, var energy, var power){
int mb = mem_mb_est();
if(mb >= UPDATE_MEM_HARD) return;
int batch = ifelse(is_H1_close(), UPDATE_BATCH_EQ_H1, UPDATE_BATCH_EQ_5M);
if(mb >= UPDATE_MEM_SOFT) batch = (batch>>1);
if(batch < UPDATE_MIN_BATCH) batch = UPDATE_MIN_BATCH;
heavyUpdateChunk(lambda,mean,energy,power,batch);
}
// ------------------- main engine step (unchanged) ---------------------
void alpha12_step(var ret1_now){
if(!ready) return;
updateAllMarkov();
if(Bar < LookBack){
net.computeProjection();
net.Ret1 = ret1_now;
var h=0; int i;
for(i=0;i<net.N;i++) h += (var)net.HitEW[i];
if(net.N > 0) h /= (var)net.N; else h=0.5;
var target = MC_ACT + 0.15*(0.55 - h) + 0.10*(MH.Entropy - 0.5);
target = clamp(target, 0.20, 0.50);
MC_ACT_dyn = 0.95*MC_ACT_dyn + 0.05*target;
return;
}
net.computeProjection();
int Keff = net.keffClamped();
int k;
var e=0, pwr=0;
for(k=0;k<Keff;k++){ var z=(var)net.Z[k]; e+=z; pwr+=z*z; }
var mean=0, power=0;
if(Keff > 0){ mean = e/(var)Keff; power = pwr/(var)Keff; }
var energy = pwr;
var lambda = 0.7*tanh(mean) + 0.3*tanh(0.05*power);
maybeRewireNow(lambda,mean,energy,power);
runHeavyUpdates(lambda,mean,energy,power);
var gamma = projectNet();
var x = blendLambdaGamma(lambda,gamma);
(void)x;
dt.updateEquationCycle(net.Prop, net.N);
net.Ret1 = ret1_now;
updateHitRates();
int beforeDepth = rt.RT_TreeMaxDepth;
rt.depth_manager_runtime(dt);
if(rt.RT_TreeMaxDepth != beforeDepth){
reindexTreeAndMap();
}
{
var h=0; int i;
for(i=0;i<net.N;i++) h += (var)net.HitEW[i];
if(net.N > 0) h /= (var)net.N; else h=0.5;
var target = MC_ACT + 0.15*(0.55 - h) + 0.10*(MH.Entropy - 0.5);
target = clamp(target, 0.20, 0.50);
MC_ACT_dyn = 0.9*MC_ACT_dyn + 0.1*target;
}
}
// ------------------- realized 1-bar return (unchanged) ----------------
var realizedRet1(){
vars C = series(priceClose(0));
if(Bar <= LookBack) return 0;
return C[0] - C[1];
}
// ------------------- trading signal (unchanged) -----------------------
var tradeSignal(){
if(!ready) return 0;
if(!net.RP || !net.Z || !net.StateSq) return 0;
net.computeProjection();
int Keff = net.keffClamped();
if(Keff <= 0) return 0;
int k;
var e=0, pwr=0;
for(k=0;k<Keff;k++){ var z=(var)net.Z[k]; e+=z; pwr+=z*z; }
var mean=0, power=0;
if(Keff > 0){ mean = e/(var)Keff; power = pwr/(var)Keff; }
var lambda = 0.7*tanh(mean) + 0.3*tanh(0.05*power);
var gamma = projectNet();
var x = blendLambdaGamma(lambda,gamma);
LastSig = x;
var gLong=0, gShort=0;
if(MH.PBullNext >= PBULL_LONG_TH) gLong = 1.0;
if(MH.PBullNext <= PBULL_SHORT_TH) gShort= 1.0;
var s=0;
if(x > 0) s = x*gLong;
else s = x*gShort;
var conf = 1.0 - 0.5*(MR.Entropy);
s *= conf;
return clamp(s,-1.,1.);
}
// ------------------- position sizing & orders (unchanged) -------------
var posSizeFromSignal(var s){
var base = 1;
var scale = 2.0*abs(s);
return base * (0.5 + 0.5*scale);
}
void placeOrders(var s){
if(s > 0){
if(NumOpenShort) exitShort();
if(!NumOpenLong){
Lots = posSizeFromSignal(s);
enterLong();
}
} else if(s < 0){
if(NumOpenLong) exitLong();
if(!NumOpenShort){
Lots = posSizeFromSignal(s);
enterShort();
}
}
}
// ------------------- plotting guard -------------------
void plotSafe(string name, var v){
if(ENABLE_PLOTS && !rt.ChartsOff) plot(name, v, NEW|LINE, 0);
}
// ------------------- per-bar wrapper -------------------
void onBar(){
var r1 = realizedRet1();
alpha12_step(r1);
var s = tradeSignal();
placeOrders(s);
plotSafe("PBull(1H)", 100*(MH.PBullNext-0.5));
plotSafe("PBull(5M)", 100*(ML.PBullNext-0.5));
plotSafe("PBull(Rel)", 100*(MR.PBullNext-0.5));
plotSafe("Entropy(1H)", 100*(MH.Entropy));
plotSafe("Sig", 100*LastSig);
}
};
// ======================================================================
// ========================= Zorro DLL entry (bridge) ====================
DLLFUNC void run()
{
if(is(INITRUN)) {
if(!gAlpha12) gAlpha12 = new Alpha12Strategy();
gAlpha12->init();
}
if(!gAlpha12 || !gAlpha12->ready)
return;
// warmup behavior: keep Markov updated and projection alive
if(is(LOOKBACK) || Bar < LookBack) {
gAlpha12->updateAllMarkov();
gAlpha12->net.computeProjection();
return;
}
gAlpha12->onBar();
if(is(EXITRUN)) {
gAlpha12->cleanup();
delete gAlpha12;
gAlpha12 = nullptr;
}
}
|
|
|
|